1 /******************************************************************************
4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x
5 * Version: $Revision: 1.23 $
6 * Date : $Date: 2005/12/22 09:04:11 $
7 * Purpose: Main driver source file
9 *****************************************************************************/
11 /******************************************************************************
14 * Copyright (C) Marvell International Ltd. and/or its affiliates
16 * The computer program files contained in this folder ("Files")
17 * are provided to you under the BSD-type license terms provided
18 * below, and any use of such Files and any derivative works
19 * thereof created by you shall be governed by the following terms
22 * - Redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer.
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials provided
27 * with the distribution.
28 * - Neither the name of Marvell nor the names of its contributors
29 * may be used to endorse or promote products derived from this
30 * software without specific prior written permission.
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
38 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
39 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
46 *****************************************************************************/
49 * Copyright (c) 1997, 1998, 1999, 2000
50 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
52 * Redistribution and use in source and binary forms, with or without
53 * modification, are permitted provided that the following conditions
55 * 1. Redistributions of source code must retain the above copyright
56 * notice, this list of conditions and the following disclaimer.
57 * 2. Redistributions in binary form must reproduce the above copyright
58 * notice, this list of conditions and the following disclaimer in the
59 * documentation and/or other materials provided with the distribution.
60 * 3. All advertising materials mentioning features or use of this software
61 * must display the following acknowledgement:
62 * This product includes software developed by Bill Paul.
63 * 4. Neither the name of the author nor the names of any co-contributors
64 * may be used to endorse or promote products derived from this software
65 * without specific prior written permission.
67 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
71 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
72 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
73 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
74 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
75 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
76 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
77 * THE POSSIBILITY OF SUCH DAMAGE.
80 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
82 * Permission to use, copy, modify, and distribute this software for any
83 * purpose with or without fee is hereby granted, provided that the above
84 * copyright notice and this permission notice appear in all copies.
86 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
87 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
88 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
89 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
90 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
91 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
92 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
96 * Device driver for the Marvell Yukon II Ethernet controller.
97 * Due to lack of documentation, this driver is based on the code from
98 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x.
101 #include <sys/cdefs.h>
102 __FBSDID("$FreeBSD$");
104 #include <sys/param.h>
105 #include <sys/systm.h>
107 #include <sys/endian.h>
108 #include <sys/mbuf.h>
109 #include <sys/malloc.h>
110 #include <sys/kernel.h>
111 #include <sys/module.h>
112 #include <sys/socket.h>
113 #include <sys/sockio.h>
114 #include <sys/queue.h>
115 #include <sys/sysctl.h>
118 #include <net/ethernet.h>
120 #include <net/if_var.h>
121 #include <net/if_arp.h>
122 #include <net/if_dl.h>
123 #include <net/if_media.h>
124 #include <net/if_types.h>
125 #include <net/if_vlan_var.h>
127 #include <netinet/in.h>
128 #include <netinet/in_systm.h>
129 #include <netinet/ip.h>
130 #include <netinet/tcp.h>
131 #include <netinet/udp.h>
133 #include <machine/bus.h>
134 #include <machine/in_cksum.h>
135 #include <machine/resource.h>
136 #include <sys/rman.h>
138 #include <dev/mii/mii.h>
139 #include <dev/mii/miivar.h>
141 #include <dev/pci/pcireg.h>
142 #include <dev/pci/pcivar.h>
144 #include <dev/msk/if_mskreg.h>
146 MODULE_DEPEND(msk, pci, 1, 1, 1);
147 MODULE_DEPEND(msk, ether, 1, 1, 1);
148 MODULE_DEPEND(msk, miibus, 1, 1, 1);
150 /* "device miibus" required. See GENERIC if you get errors here. */
151 #include "miibus_if.h"
154 static int msi_disable = 0;
155 TUNABLE_INT("hw.msk.msi_disable", &msi_disable);
156 static int legacy_intr = 0;
157 TUNABLE_INT("hw.msk.legacy_intr", &legacy_intr);
158 static int jumbo_disable = 0;
159 TUNABLE_INT("hw.msk.jumbo_disable", &jumbo_disable);
161 #define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
164 * Devices supported by this driver.
166 static const struct msk_product {
167 uint16_t msk_vendorid;
168 uint16_t msk_deviceid;
169 const char *msk_name;
171 { VENDORID_SK, DEVICEID_SK_YUKON2,
172 "SK-9Sxx Gigabit Ethernet" },
173 { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR,
174 "SK-9Exx Gigabit Ethernet"},
175 { VENDORID_MARVELL, DEVICEID_MRVL_8021CU,
176 "Marvell Yukon 88E8021CU Gigabit Ethernet" },
177 { VENDORID_MARVELL, DEVICEID_MRVL_8021X,
178 "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" },
179 { VENDORID_MARVELL, DEVICEID_MRVL_8022CU,
180 "Marvell Yukon 88E8022CU Gigabit Ethernet" },
181 { VENDORID_MARVELL, DEVICEID_MRVL_8022X,
182 "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" },
183 { VENDORID_MARVELL, DEVICEID_MRVL_8061CU,
184 "Marvell Yukon 88E8061CU Gigabit Ethernet" },
185 { VENDORID_MARVELL, DEVICEID_MRVL_8061X,
186 "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" },
187 { VENDORID_MARVELL, DEVICEID_MRVL_8062CU,
188 "Marvell Yukon 88E8062CU Gigabit Ethernet" },
189 { VENDORID_MARVELL, DEVICEID_MRVL_8062X,
190 "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" },
191 { VENDORID_MARVELL, DEVICEID_MRVL_8035,
192 "Marvell Yukon 88E8035 Fast Ethernet" },
193 { VENDORID_MARVELL, DEVICEID_MRVL_8036,
194 "Marvell Yukon 88E8036 Fast Ethernet" },
195 { VENDORID_MARVELL, DEVICEID_MRVL_8038,
196 "Marvell Yukon 88E8038 Fast Ethernet" },
197 { VENDORID_MARVELL, DEVICEID_MRVL_8039,
198 "Marvell Yukon 88E8039 Fast Ethernet" },
199 { VENDORID_MARVELL, DEVICEID_MRVL_8040,
200 "Marvell Yukon 88E8040 Fast Ethernet" },
201 { VENDORID_MARVELL, DEVICEID_MRVL_8040T,
202 "Marvell Yukon 88E8040T Fast Ethernet" },
203 { VENDORID_MARVELL, DEVICEID_MRVL_8042,
204 "Marvell Yukon 88E8042 Fast Ethernet" },
205 { VENDORID_MARVELL, DEVICEID_MRVL_8048,
206 "Marvell Yukon 88E8048 Fast Ethernet" },
207 { VENDORID_MARVELL, DEVICEID_MRVL_4361,
208 "Marvell Yukon 88E8050 Gigabit Ethernet" },
209 { VENDORID_MARVELL, DEVICEID_MRVL_4360,
210 "Marvell Yukon 88E8052 Gigabit Ethernet" },
211 { VENDORID_MARVELL, DEVICEID_MRVL_4362,
212 "Marvell Yukon 88E8053 Gigabit Ethernet" },
213 { VENDORID_MARVELL, DEVICEID_MRVL_4363,
214 "Marvell Yukon 88E8055 Gigabit Ethernet" },
215 { VENDORID_MARVELL, DEVICEID_MRVL_4364,
216 "Marvell Yukon 88E8056 Gigabit Ethernet" },
217 { VENDORID_MARVELL, DEVICEID_MRVL_4365,
218 "Marvell Yukon 88E8070 Gigabit Ethernet" },
219 { VENDORID_MARVELL, DEVICEID_MRVL_436A,
220 "Marvell Yukon 88E8058 Gigabit Ethernet" },
221 { VENDORID_MARVELL, DEVICEID_MRVL_436B,
222 "Marvell Yukon 88E8071 Gigabit Ethernet" },
223 { VENDORID_MARVELL, DEVICEID_MRVL_436C,
224 "Marvell Yukon 88E8072 Gigabit Ethernet" },
225 { VENDORID_MARVELL, DEVICEID_MRVL_436D,
226 "Marvell Yukon 88E8055 Gigabit Ethernet" },
227 { VENDORID_MARVELL, DEVICEID_MRVL_4370,
228 "Marvell Yukon 88E8075 Gigabit Ethernet" },
229 { VENDORID_MARVELL, DEVICEID_MRVL_4380,
230 "Marvell Yukon 88E8057 Gigabit Ethernet" },
231 { VENDORID_MARVELL, DEVICEID_MRVL_4381,
232 "Marvell Yukon 88E8059 Gigabit Ethernet" },
233 { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX,
234 "D-Link 550SX Gigabit Ethernet" },
235 { VENDORID_DLINK, DEVICEID_DLINK_DGE560SX,
236 "D-Link 560SX Gigabit Ethernet" },
237 { VENDORID_DLINK, DEVICEID_DLINK_DGE560T,
238 "D-Link 560T Gigabit Ethernet" }
241 static const char *model_name[] = {
254 static int mskc_probe(device_t);
255 static int mskc_attach(device_t);
256 static int mskc_detach(device_t);
257 static int mskc_shutdown(device_t);
258 static int mskc_setup_rambuffer(struct msk_softc *);
259 static int mskc_suspend(device_t);
260 static int mskc_resume(device_t);
261 static bus_dma_tag_t mskc_get_dma_tag(device_t, device_t);
262 static void mskc_reset(struct msk_softc *);
264 static int msk_probe(device_t);
265 static int msk_attach(device_t);
266 static int msk_detach(device_t);
268 static void msk_tick(void *);
269 static void msk_intr(void *);
270 static void msk_intr_phy(struct msk_if_softc *);
271 static void msk_intr_gmac(struct msk_if_softc *);
272 static __inline void msk_rxput(struct msk_if_softc *);
273 static int msk_handle_events(struct msk_softc *);
274 static void msk_handle_hwerr(struct msk_if_softc *, uint32_t);
275 static void msk_intr_hwerr(struct msk_softc *);
276 #ifndef __NO_STRICT_ALIGNMENT
277 static __inline void msk_fixup_rx(struct mbuf *);
279 static __inline void msk_rxcsum(struct msk_if_softc *, uint32_t, struct mbuf *);
280 static void msk_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int);
281 static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int);
282 static void msk_txeof(struct msk_if_softc *, int);
283 static int msk_encap(struct msk_if_softc *, struct mbuf **);
284 static void msk_start(struct ifnet *);
285 static void msk_start_locked(struct ifnet *);
286 static int msk_ioctl(struct ifnet *, u_long, caddr_t);
287 static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t);
288 static void msk_set_rambuffer(struct msk_if_softc *);
289 static void msk_set_tx_stfwd(struct msk_if_softc *);
290 static void msk_init(void *);
291 static void msk_init_locked(struct msk_if_softc *);
292 static void msk_stop(struct msk_if_softc *);
293 static void msk_watchdog(struct msk_if_softc *);
294 static int msk_mediachange(struct ifnet *);
295 static void msk_mediastatus(struct ifnet *, struct ifmediareq *);
296 static void msk_phy_power(struct msk_softc *, int);
297 static void msk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
298 static int msk_status_dma_alloc(struct msk_softc *);
299 static void msk_status_dma_free(struct msk_softc *);
300 static int msk_txrx_dma_alloc(struct msk_if_softc *);
301 static int msk_rx_dma_jalloc(struct msk_if_softc *);
302 static void msk_txrx_dma_free(struct msk_if_softc *);
303 static void msk_rx_dma_jfree(struct msk_if_softc *);
304 static int msk_rx_fill(struct msk_if_softc *, int);
305 static int msk_init_rx_ring(struct msk_if_softc *);
306 static int msk_init_jumbo_rx_ring(struct msk_if_softc *);
307 static void msk_init_tx_ring(struct msk_if_softc *);
308 static __inline void msk_discard_rxbuf(struct msk_if_softc *, int);
309 static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int);
310 static int msk_newbuf(struct msk_if_softc *, int);
311 static int msk_jumbo_newbuf(struct msk_if_softc *, int);
313 static int msk_phy_readreg(struct msk_if_softc *, int, int);
314 static int msk_phy_writereg(struct msk_if_softc *, int, int, int);
315 static int msk_miibus_readreg(device_t, int, int);
316 static int msk_miibus_writereg(device_t, int, int, int);
317 static void msk_miibus_statchg(device_t);
319 static void msk_rxfilter(struct msk_if_softc *);
320 static void msk_setvlan(struct msk_if_softc *, struct ifnet *);
322 static void msk_stats_clear(struct msk_if_softc *);
323 static void msk_stats_update(struct msk_if_softc *);
324 static int msk_sysctl_stat32(SYSCTL_HANDLER_ARGS);
325 static int msk_sysctl_stat64(SYSCTL_HANDLER_ARGS);
326 static void msk_sysctl_node(struct msk_if_softc *);
327 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
328 static int sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS);
330 static device_method_t mskc_methods[] = {
331 /* Device interface */
332 DEVMETHOD(device_probe, mskc_probe),
333 DEVMETHOD(device_attach, mskc_attach),
334 DEVMETHOD(device_detach, mskc_detach),
335 DEVMETHOD(device_suspend, mskc_suspend),
336 DEVMETHOD(device_resume, mskc_resume),
337 DEVMETHOD(device_shutdown, mskc_shutdown),
339 DEVMETHOD(bus_get_dma_tag, mskc_get_dma_tag),
344 static driver_t mskc_driver = {
347 sizeof(struct msk_softc)
350 static devclass_t mskc_devclass;
352 static device_method_t msk_methods[] = {
353 /* Device interface */
354 DEVMETHOD(device_probe, msk_probe),
355 DEVMETHOD(device_attach, msk_attach),
356 DEVMETHOD(device_detach, msk_detach),
357 DEVMETHOD(device_shutdown, bus_generic_shutdown),
360 DEVMETHOD(miibus_readreg, msk_miibus_readreg),
361 DEVMETHOD(miibus_writereg, msk_miibus_writereg),
362 DEVMETHOD(miibus_statchg, msk_miibus_statchg),
367 static driver_t msk_driver = {
370 sizeof(struct msk_if_softc)
373 static devclass_t msk_devclass;
375 DRIVER_MODULE(mskc, pci, mskc_driver, mskc_devclass, NULL, NULL);
376 DRIVER_MODULE(msk, mskc, msk_driver, msk_devclass, NULL, NULL);
377 DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, NULL, NULL);
379 static struct resource_spec msk_res_spec_io[] = {
380 { SYS_RES_IOPORT, PCIR_BAR(1), RF_ACTIVE },
384 static struct resource_spec msk_res_spec_mem[] = {
385 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
389 static struct resource_spec msk_irq_spec_legacy[] = {
390 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
394 static struct resource_spec msk_irq_spec_msi[] = {
395 { SYS_RES_IRQ, 1, RF_ACTIVE },
400 msk_miibus_readreg(device_t dev, int phy, int reg)
402 struct msk_if_softc *sc_if;
404 sc_if = device_get_softc(dev);
406 return (msk_phy_readreg(sc_if, phy, reg));
410 msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg)
412 struct msk_softc *sc;
415 sc = sc_if->msk_softc;
417 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
418 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
420 for (i = 0; i < MSK_TIMEOUT; i++) {
422 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL);
423 if ((val & GM_SMI_CT_RD_VAL) != 0) {
424 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA);
429 if (i == MSK_TIMEOUT) {
430 if_printf(sc_if->msk_ifp, "phy failed to come ready\n");
438 msk_miibus_writereg(device_t dev, int phy, int reg, int val)
440 struct msk_if_softc *sc_if;
442 sc_if = device_get_softc(dev);
444 return (msk_phy_writereg(sc_if, phy, reg, val));
448 msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val)
450 struct msk_softc *sc;
453 sc = sc_if->msk_softc;
455 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val);
456 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
457 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg));
458 for (i = 0; i < MSK_TIMEOUT; i++) {
460 if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) &
461 GM_SMI_CT_BUSY) == 0)
464 if (i == MSK_TIMEOUT)
465 if_printf(sc_if->msk_ifp, "phy write timeout\n");
471 msk_miibus_statchg(device_t dev)
473 struct msk_softc *sc;
474 struct msk_if_softc *sc_if;
475 struct mii_data *mii;
479 sc_if = device_get_softc(dev);
480 sc = sc_if->msk_softc;
482 MSK_IF_LOCK_ASSERT(sc_if);
484 mii = device_get_softc(sc_if->msk_miibus);
485 ifp = sc_if->msk_ifp;
486 if (mii == NULL || ifp == NULL ||
487 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
490 sc_if->msk_flags &= ~MSK_FLAG_LINK;
491 if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) ==
492 (IFM_AVALID | IFM_ACTIVE)) {
493 switch (IFM_SUBTYPE(mii->mii_media_active)) {
496 sc_if->msk_flags |= MSK_FLAG_LINK;
502 if ((sc_if->msk_flags & MSK_FLAG_FASTETHER) == 0)
503 sc_if->msk_flags |= MSK_FLAG_LINK;
510 if ((sc_if->msk_flags & MSK_FLAG_LINK) != 0) {
511 /* Enable Tx FIFO Underrun. */
512 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK),
513 GM_IS_TX_FF_UR | GM_IS_RX_FF_OR);
515 * Because mii(4) notify msk(4) that it detected link status
516 * change, there is no need to enable automatic
517 * speed/flow-control/duplex updates.
519 gmac = GM_GPCR_AU_ALL_DIS;
520 switch (IFM_SUBTYPE(mii->mii_media_active)) {
523 gmac |= GM_GPCR_SPEED_1000;
526 gmac |= GM_GPCR_SPEED_100;
532 if ((IFM_OPTIONS(mii->mii_media_active) &
533 IFM_ETH_RXPAUSE) == 0)
534 gmac |= GM_GPCR_FC_RX_DIS;
535 if ((IFM_OPTIONS(mii->mii_media_active) &
536 IFM_ETH_TXPAUSE) == 0)
537 gmac |= GM_GPCR_FC_TX_DIS;
538 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
539 gmac |= GM_GPCR_DUP_FULL;
541 gmac |= GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS;
542 gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
543 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
544 /* Read again to ensure writing. */
545 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
546 gmac = GMC_PAUSE_OFF;
547 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
548 if ((IFM_OPTIONS(mii->mii_media_active) &
549 IFM_ETH_RXPAUSE) != 0)
552 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac);
554 /* Enable PHY interrupt for FIFO underrun/overflow. */
555 msk_phy_writereg(sc_if, PHY_ADDR_MARV,
556 PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR);
559 * Link state changed to down.
560 * Disable PHY interrupts.
562 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
563 /* Disable Rx/Tx MAC. */
564 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
565 if ((gmac & (GM_GPCR_RX_ENA | GM_GPCR_TX_ENA)) != 0) {
566 gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
567 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
568 /* Read again to ensure writing. */
569 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
575 msk_rxfilter(struct msk_if_softc *sc_if)
577 struct msk_softc *sc;
579 struct ifmultiaddr *ifma;
584 sc = sc_if->msk_softc;
586 MSK_IF_LOCK_ASSERT(sc_if);
588 ifp = sc_if->msk_ifp;
590 bzero(mchash, sizeof(mchash));
591 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
592 if ((ifp->if_flags & IFF_PROMISC) != 0)
593 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
594 else if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
595 mode |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
599 mode |= GM_RXCR_UCF_ENA;
601 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
602 if (ifma->ifma_addr->sa_family != AF_LINK)
604 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
605 ifma->ifma_addr), ETHER_ADDR_LEN);
606 /* Just want the 6 least significant bits. */
608 /* Set the corresponding bit in the hash table. */
609 mchash[crc >> 5] |= 1 << (crc & 0x1f);
611 if_maddr_runlock(ifp);
612 if (mchash[0] != 0 || mchash[1] != 0)
613 mode |= GM_RXCR_MCF_ENA;
616 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1,
618 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2,
619 (mchash[0] >> 16) & 0xffff);
620 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3,
622 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4,
623 (mchash[1] >> 16) & 0xffff);
624 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
628 msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp)
630 struct msk_softc *sc;
632 sc = sc_if->msk_softc;
633 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
634 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
636 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
639 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
641 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
647 msk_rx_fill(struct msk_if_softc *sc_if, int jumbo)
652 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
653 (sc_if->msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) {
654 /* Wait until controller executes OP_TCPSTART command. */
655 for (i = 100; i > 0; i--) {
657 idx = CSR_READ_2(sc_if->msk_softc,
658 Y2_PREF_Q_ADDR(sc_if->msk_rxq,
659 PREF_UNIT_GET_IDX_REG));
664 device_printf(sc_if->msk_if_dev,
665 "prefetch unit stuck?\n");
669 * Fill consumed LE with free buffer. This can be done
670 * in Rx handler but we don't want to add special code
674 if (msk_jumbo_newbuf(sc_if, 0) != 0)
676 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
677 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
678 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
680 if (msk_newbuf(sc_if, 0) != 0)
682 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
683 sc_if->msk_cdata.msk_rx_ring_map,
684 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
686 sc_if->msk_cdata.msk_rx_prod = 0;
687 CSR_WRITE_2(sc_if->msk_softc,
688 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
689 sc_if->msk_cdata.msk_rx_prod);
695 msk_init_rx_ring(struct msk_if_softc *sc_if)
697 struct msk_ring_data *rd;
698 struct msk_rxdesc *rxd;
701 MSK_IF_LOCK_ASSERT(sc_if);
703 sc_if->msk_cdata.msk_rx_cons = 0;
704 sc_if->msk_cdata.msk_rx_prod = 0;
705 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
707 rd = &sc_if->msk_rdata;
708 bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
709 for (i = prod = 0; i < MSK_RX_RING_CNT; i++) {
710 rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
712 rxd->rx_le = &rd->msk_rx_ring[prod];
713 MSK_INC(prod, MSK_RX_RING_CNT);
715 nbuf = MSK_RX_BUF_CNT;
717 /* Have controller know how to compute Rx checksum. */
718 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
719 (sc_if->msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) {
721 rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
723 rxd->rx_le = &rd->msk_rx_ring[prod];
724 rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
726 rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
727 MSK_INC(prod, MSK_RX_RING_CNT);
728 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
730 rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
732 rxd->rx_le = &rd->msk_rx_ring[prod];
733 rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
735 rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
736 MSK_INC(prod, MSK_RX_RING_CNT);
737 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
740 for (i = 0; i < nbuf; i++) {
741 if (msk_newbuf(sc_if, prod) != 0)
743 MSK_RX_INC(prod, MSK_RX_RING_CNT);
746 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
747 sc_if->msk_cdata.msk_rx_ring_map,
748 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
750 /* Update prefetch unit. */
751 sc_if->msk_cdata.msk_rx_prod = prod;
752 CSR_WRITE_2(sc_if->msk_softc,
753 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
754 (sc_if->msk_cdata.msk_rx_prod + MSK_RX_RING_CNT - 1) %
756 if (msk_rx_fill(sc_if, 0) != 0)
762 msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if)
764 struct msk_ring_data *rd;
765 struct msk_rxdesc *rxd;
768 MSK_IF_LOCK_ASSERT(sc_if);
770 sc_if->msk_cdata.msk_rx_cons = 0;
771 sc_if->msk_cdata.msk_rx_prod = 0;
772 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
774 rd = &sc_if->msk_rdata;
775 bzero(rd->msk_jumbo_rx_ring,
776 sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT);
777 for (i = prod = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
778 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
780 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
781 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
783 nbuf = MSK_RX_BUF_CNT;
785 /* Have controller know how to compute Rx checksum. */
786 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
787 (sc_if->msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) {
789 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
791 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
792 rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
794 rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
795 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
796 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
798 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
800 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
801 rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
803 rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
804 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
805 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
808 for (i = 0; i < nbuf; i++) {
809 if (msk_jumbo_newbuf(sc_if, prod) != 0)
811 MSK_RX_INC(prod, MSK_JUMBO_RX_RING_CNT);
814 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
815 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
816 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
818 /* Update prefetch unit. */
819 sc_if->msk_cdata.msk_rx_prod = prod;
820 CSR_WRITE_2(sc_if->msk_softc,
821 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
822 (sc_if->msk_cdata.msk_rx_prod + MSK_JUMBO_RX_RING_CNT - 1) %
823 MSK_JUMBO_RX_RING_CNT);
824 if (msk_rx_fill(sc_if, 1) != 0)
830 msk_init_tx_ring(struct msk_if_softc *sc_if)
832 struct msk_ring_data *rd;
833 struct msk_txdesc *txd;
836 sc_if->msk_cdata.msk_tso_mtu = 0;
837 sc_if->msk_cdata.msk_last_csum = 0;
838 sc_if->msk_cdata.msk_tx_prod = 0;
839 sc_if->msk_cdata.msk_tx_cons = 0;
840 sc_if->msk_cdata.msk_tx_cnt = 0;
841 sc_if->msk_cdata.msk_tx_high_addr = 0;
843 rd = &sc_if->msk_rdata;
844 bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT);
845 for (i = 0; i < MSK_TX_RING_CNT; i++) {
846 txd = &sc_if->msk_cdata.msk_txdesc[i];
848 txd->tx_le = &rd->msk_tx_ring[i];
851 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
852 sc_if->msk_cdata.msk_tx_ring_map,
853 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
857 msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx)
859 struct msk_rx_desc *rx_le;
860 struct msk_rxdesc *rxd;
864 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
866 rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
867 MSK_INC(idx, MSK_RX_RING_CNT);
869 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
872 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
876 msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx)
878 struct msk_rx_desc *rx_le;
879 struct msk_rxdesc *rxd;
883 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
885 rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
886 MSK_INC(idx, MSK_JUMBO_RX_RING_CNT);
888 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
891 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
895 msk_newbuf(struct msk_if_softc *sc_if, int idx)
897 struct msk_rx_desc *rx_le;
898 struct msk_rxdesc *rxd;
900 bus_dma_segment_t segs[1];
904 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
908 m->m_len = m->m_pkthdr.len = MCLBYTES;
909 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
910 m_adj(m, ETHER_ALIGN);
911 #ifndef __NO_STRICT_ALIGNMENT
913 m_adj(m, MSK_RX_BUF_ALIGN);
916 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_rx_tag,
917 sc_if->msk_cdata.msk_rx_sparemap, m, segs, &nsegs,
918 BUS_DMA_NOWAIT) != 0) {
922 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
924 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
927 rx_le->msk_addr = htole32(MSK_ADDR_HI(segs[0].ds_addr));
928 rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
929 MSK_INC(idx, MSK_RX_RING_CNT);
930 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
932 if (rxd->rx_m != NULL) {
933 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
934 BUS_DMASYNC_POSTREAD);
935 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap);
938 map = rxd->rx_dmamap;
939 rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap;
940 sc_if->msk_cdata.msk_rx_sparemap = map;
941 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
942 BUS_DMASYNC_PREREAD);
945 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
947 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
953 msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx)
955 struct msk_rx_desc *rx_le;
956 struct msk_rxdesc *rxd;
958 bus_dma_segment_t segs[1];
962 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
965 if ((m->m_flags & M_EXT) == 0) {
969 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
970 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
971 m_adj(m, ETHER_ALIGN);
972 #ifndef __NO_STRICT_ALIGNMENT
974 m_adj(m, MSK_RX_BUF_ALIGN);
977 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag,
978 sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs,
979 BUS_DMA_NOWAIT) != 0) {
983 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
985 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
988 rx_le->msk_addr = htole32(MSK_ADDR_HI(segs[0].ds_addr));
989 rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
990 MSK_INC(idx, MSK_JUMBO_RX_RING_CNT);
991 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
993 if (rxd->rx_m != NULL) {
994 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
995 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
996 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
1000 map = rxd->rx_dmamap;
1001 rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap;
1002 sc_if->msk_cdata.msk_jumbo_rx_sparemap = map;
1003 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap,
1004 BUS_DMASYNC_PREREAD);
1007 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
1008 rx_le->msk_control =
1009 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
1015 * Set media options.
1018 msk_mediachange(struct ifnet *ifp)
1020 struct msk_if_softc *sc_if;
1021 struct mii_data *mii;
1024 sc_if = ifp->if_softc;
1027 mii = device_get_softc(sc_if->msk_miibus);
1028 error = mii_mediachg(mii);
1029 MSK_IF_UNLOCK(sc_if);
1035 * Report current media status.
1038 msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1040 struct msk_if_softc *sc_if;
1041 struct mii_data *mii;
1043 sc_if = ifp->if_softc;
1045 if ((ifp->if_flags & IFF_UP) == 0) {
1046 MSK_IF_UNLOCK(sc_if);
1049 mii = device_get_softc(sc_if->msk_miibus);
1052 ifmr->ifm_active = mii->mii_media_active;
1053 ifmr->ifm_status = mii->mii_media_status;
1054 MSK_IF_UNLOCK(sc_if);
1058 msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1060 struct msk_if_softc *sc_if;
1062 struct mii_data *mii;
1063 int error, mask, reinit;
1065 sc_if = ifp->if_softc;
1066 ifr = (struct ifreq *)data;
1072 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN)
1074 else if (ifp->if_mtu != ifr->ifr_mtu) {
1075 if (ifr->ifr_mtu > ETHERMTU) {
1076 if ((sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) {
1078 MSK_IF_UNLOCK(sc_if);
1081 if ((sc_if->msk_flags &
1082 MSK_FLAG_JUMBO_NOCSUM) != 0) {
1084 ~(MSK_CSUM_FEATURES | CSUM_TSO);
1085 ifp->if_capenable &=
1086 ~(IFCAP_TSO4 | IFCAP_TXCSUM);
1087 VLAN_CAPABILITIES(ifp);
1090 ifp->if_mtu = ifr->ifr_mtu;
1091 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1092 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1093 msk_init_locked(sc_if);
1096 MSK_IF_UNLOCK(sc_if);
1100 if ((ifp->if_flags & IFF_UP) != 0) {
1101 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
1102 ((ifp->if_flags ^ sc_if->msk_if_flags) &
1103 (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1104 msk_rxfilter(sc_if);
1105 else if ((sc_if->msk_flags & MSK_FLAG_DETACH) == 0)
1106 msk_init_locked(sc_if);
1107 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1109 sc_if->msk_if_flags = ifp->if_flags;
1110 MSK_IF_UNLOCK(sc_if);
1115 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1116 msk_rxfilter(sc_if);
1117 MSK_IF_UNLOCK(sc_if);
1121 mii = device_get_softc(sc_if->msk_miibus);
1122 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1127 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1128 if ((mask & IFCAP_TXCSUM) != 0 &&
1129 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
1130 ifp->if_capenable ^= IFCAP_TXCSUM;
1131 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
1132 ifp->if_hwassist |= MSK_CSUM_FEATURES;
1134 ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
1136 if ((mask & IFCAP_RXCSUM) != 0 &&
1137 (IFCAP_RXCSUM & ifp->if_capabilities) != 0) {
1138 ifp->if_capenable ^= IFCAP_RXCSUM;
1139 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0)
1142 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
1143 (IFCAP_VLAN_HWCSUM & ifp->if_capabilities) != 0)
1144 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1145 if ((mask & IFCAP_TSO4) != 0 &&
1146 (IFCAP_TSO4 & ifp->if_capabilities) != 0) {
1147 ifp->if_capenable ^= IFCAP_TSO4;
1148 if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
1149 ifp->if_hwassist |= CSUM_TSO;
1151 ifp->if_hwassist &= ~CSUM_TSO;
1153 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
1154 (IFCAP_VLAN_HWTSO & ifp->if_capabilities) != 0)
1155 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1156 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1157 (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) {
1158 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1159 if ((IFCAP_VLAN_HWTAGGING & ifp->if_capenable) == 0)
1160 ifp->if_capenable &=
1161 ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM);
1162 msk_setvlan(sc_if, ifp);
1164 if (ifp->if_mtu > ETHERMTU &&
1165 (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) {
1166 ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
1167 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
1169 VLAN_CAPABILITIES(ifp);
1170 if (reinit > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1171 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1172 msk_init_locked(sc_if);
1174 MSK_IF_UNLOCK(sc_if);
1177 error = ether_ioctl(ifp, command, data);
1185 mskc_probe(device_t dev)
1187 const struct msk_product *mp;
1188 uint16_t vendor, devid;
1191 vendor = pci_get_vendor(dev);
1192 devid = pci_get_device(dev);
1194 for (i = 0; i < nitems(msk_products); i++, mp++) {
1195 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) {
1196 device_set_desc(dev, mp->msk_name);
1197 return (BUS_PROBE_DEFAULT);
1205 mskc_setup_rambuffer(struct msk_softc *sc)
1210 /* Get adapter SRAM size. */
1211 sc->msk_ramsize = CSR_READ_1(sc, B2_E_0) * 4;
1213 device_printf(sc->msk_dev,
1214 "RAM buffer size : %dKB\n", sc->msk_ramsize);
1215 if (sc->msk_ramsize == 0)
1218 sc->msk_pflags |= MSK_FLAG_RAMBUF;
1220 * Give receiver 2/3 of memory and round down to the multiple
1221 * of 1024. Tx/Rx RAM buffer size of Yukon II should be multiple
1224 sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024);
1225 sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize;
1226 for (i = 0, next = 0; i < sc->msk_num_port; i++) {
1227 sc->msk_rxqstart[i] = next;
1228 sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1;
1229 next = sc->msk_rxqend[i] + 1;
1230 sc->msk_txqstart[i] = next;
1231 sc->msk_txqend[i] = next + sc->msk_txqsize - 1;
1232 next = sc->msk_txqend[i] + 1;
1234 device_printf(sc->msk_dev,
1235 "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i,
1236 sc->msk_rxqsize / 1024, sc->msk_rxqstart[i],
1238 device_printf(sc->msk_dev,
1239 "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i,
1240 sc->msk_txqsize / 1024, sc->msk_txqstart[i],
1249 msk_phy_power(struct msk_softc *sc, int mode)
1255 case MSK_PHY_POWERUP:
1256 /* Switch power to VCC (WA for VAUX problem). */
1257 CSR_WRITE_1(sc, B0_POWER_CTRL,
1258 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
1259 /* Disable Core Clock Division, set Clock Select to 0. */
1260 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
1263 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1264 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1265 /* Enable bits are inverted. */
1266 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1267 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1268 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1271 * Enable PCI & Core Clock, enable clock gating for both Links.
1273 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1275 our = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
1276 our &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
1277 if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
1278 if (sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1279 /* Deassert Low Power for 1st PHY. */
1280 our |= PCI_Y2_PHY1_COMA;
1281 if (sc->msk_num_port > 1)
1282 our |= PCI_Y2_PHY2_COMA;
1285 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U ||
1286 sc->msk_hw_id == CHIP_ID_YUKON_EX ||
1287 sc->msk_hw_id >= CHIP_ID_YUKON_FE_P) {
1288 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_4);
1289 val &= (PCI_FORCE_ASPM_REQUEST |
1290 PCI_ASPM_GPHY_LINK_DOWN | PCI_ASPM_INT_FIFO_EMPTY |
1291 PCI_ASPM_CLKRUN_REQUEST);
1292 /* Set all bits to 0 except bits 15..12. */
1293 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_4, val);
1294 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_5);
1295 val &= PCI_CTL_TIM_VMAIN_AV_MSK;
1296 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_5, val);
1297 CSR_PCI_WRITE_4(sc, PCI_CFG_REG_1, 0);
1298 CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON);
1300 * Disable status race, workaround for
1301 * Yukon EC Ultra & Yukon EX.
1303 val = CSR_READ_4(sc, B2_GP_IO);
1304 val |= GLB_GPIO_STAT_RACE_DIS;
1305 CSR_WRITE_4(sc, B2_GP_IO, val);
1306 CSR_READ_4(sc, B2_GP_IO);
1308 /* Release PHY from PowerDown/COMA mode. */
1309 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, our);
1311 for (i = 0; i < sc->msk_num_port; i++) {
1312 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1314 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1318 case MSK_PHY_POWERDOWN:
1319 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
1320 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD;
1321 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1322 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1323 val &= ~PCI_Y2_PHY1_COMA;
1324 if (sc->msk_num_port > 1)
1325 val &= ~PCI_Y2_PHY2_COMA;
1327 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, val);
1329 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1330 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1331 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1332 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1333 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1334 /* Enable bits are inverted. */
1338 * Disable PCI & Core Clock, disable clock gating for
1341 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1342 CSR_WRITE_1(sc, B0_POWER_CTRL,
1343 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
1351 mskc_reset(struct msk_softc *sc)
1359 if (sc->msk_hw_id >= CHIP_ID_YUKON_XL &&
1360 sc->msk_hw_id <= CHIP_ID_YUKON_SUPR) {
1361 if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
1362 sc->msk_hw_id == CHIP_ID_YUKON_SUPR) {
1363 CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0);
1364 status = CSR_READ_2(sc, B28_Y2_ASF_HCU_CCSR);
1365 /* Clear AHB bridge & microcontroller reset. */
1366 status &= ~(Y2_ASF_HCU_CCSR_AHB_RST |
1367 Y2_ASF_HCU_CCSR_CPU_RST_MODE);
1368 /* Clear ASF microcontroller state. */
1369 status &= ~Y2_ASF_HCU_CCSR_UC_STATE_MSK;
1370 status &= ~Y2_ASF_HCU_CCSR_CPU_CLK_DIVIDE_MSK;
1371 CSR_WRITE_2(sc, B28_Y2_ASF_HCU_CCSR, status);
1372 CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0);
1374 CSR_WRITE_1(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
1375 CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE);
1377 * Since we disabled ASF, S/W reset is required for
1380 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1381 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1384 /* Clear all error bits in the PCI status register. */
1385 status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
1386 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1388 pci_write_config(sc->msk_dev, PCIR_STATUS, status |
1389 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
1390 PCIM_STATUS_RTABORT | PCIM_STATUS_MDPERR, 2);
1391 CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR);
1393 switch (sc->msk_bustype) {
1395 /* Clear all PEX errors. */
1396 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
1397 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
1398 if ((val & PEX_RX_OV) != 0) {
1399 sc->msk_intrmask &= ~Y2_IS_HW_ERR;
1400 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
1405 /* Set Cache Line Size to 2(8bytes) if configured to 0. */
1406 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1);
1408 pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1);
1409 if (sc->msk_bustype == MSK_PCIX_BUS) {
1410 /* Set Cache Line Size opt. */
1411 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1413 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1417 /* Set PHY power state. */
1418 msk_phy_power(sc, MSK_PHY_POWERUP);
1420 /* Reset GPHY/GMAC Control */
1421 for (i = 0; i < sc->msk_num_port; i++) {
1422 /* GPHY Control reset. */
1423 CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET);
1424 CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR);
1425 /* GMAC Control reset. */
1426 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET);
1427 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR);
1428 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF);
1429 if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
1430 sc->msk_hw_id == CHIP_ID_YUKON_SUPR)
1431 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL),
1432 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
1436 if (sc->msk_hw_id == CHIP_ID_YUKON_SUPR &&
1437 sc->msk_hw_rev > CHIP_REV_YU_SU_B0)
1438 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, PCI_CLK_MACSEC_DIS);
1439 if (sc->msk_hw_id == CHIP_ID_YUKON_OPT && sc->msk_hw_rev == 0) {
1440 /* Disable PCIe PHY powerdown(reg 0x80, bit7). */
1441 CSR_WRITE_4(sc, Y2_PEX_PHY_DATA, (0x0080 << 16) | 0x0080);
1443 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1446 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON);
1448 /* Clear TWSI IRQ. */
1449 CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ);
1451 /* Turn off hardware timer. */
1452 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP);
1453 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ);
1455 /* Turn off descriptor polling. */
1456 CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP);
1458 /* Turn off time stamps. */
1459 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP);
1460 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
1463 if (sc->msk_hw_id == CHIP_ID_YUKON_XL ||
1464 sc->msk_hw_id == CHIP_ID_YUKON_EC ||
1465 sc->msk_hw_id == CHIP_ID_YUKON_FE)
1468 /* Configure timeout values. */
1469 for (i = 0; initram > 0 && i < sc->msk_num_port; i++) {
1470 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET);
1471 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
1472 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1),
1474 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1),
1476 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1),
1478 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1),
1480 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1),
1482 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1),
1484 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2),
1486 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2),
1488 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2),
1490 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2),
1492 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2),
1494 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2),
1498 /* Disable all interrupts. */
1499 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1500 CSR_READ_4(sc, B0_HWE_IMSK);
1501 CSR_WRITE_4(sc, B0_IMSK, 0);
1502 CSR_READ_4(sc, B0_IMSK);
1505 * On dual port PCI-X card, there is an problem where status
1506 * can be received out of order due to split transactions.
1508 if (sc->msk_pcixcap != 0 && sc->msk_num_port > 1) {
1511 pcix_cmd = pci_read_config(sc->msk_dev,
1512 sc->msk_pcixcap + PCIXR_COMMAND, 2);
1513 /* Clear Max Outstanding Split Transactions. */
1514 pcix_cmd &= ~PCIXM_COMMAND_MAX_SPLITS;
1515 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1516 pci_write_config(sc->msk_dev,
1517 sc->msk_pcixcap + PCIXR_COMMAND, pcix_cmd, 2);
1518 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1520 if (sc->msk_expcap != 0) {
1521 /* Change Max. Read Request Size to 2048 bytes. */
1522 if (pci_get_max_read_req(sc->msk_dev) == 512)
1523 pci_set_max_read_req(sc->msk_dev, 2048);
1526 /* Clear status list. */
1527 bzero(sc->msk_stat_ring,
1528 sizeof(struct msk_stat_desc) * sc->msk_stat_count);
1529 sc->msk_stat_cons = 0;
1530 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
1531 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1532 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET);
1533 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR);
1534 /* Set the status list base address. */
1535 addr = sc->msk_stat_ring_paddr;
1536 CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr));
1537 CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr));
1538 /* Set the status list last index. */
1539 CSR_WRITE_2(sc, STAT_LAST_IDX, sc->msk_stat_count - 1);
1540 if (sc->msk_hw_id == CHIP_ID_YUKON_EC &&
1541 sc->msk_hw_rev == CHIP_REV_YU_EC_A1) {
1542 /* WA for dev. #4.3 */
1543 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK);
1544 /* WA for dev. #4.18 */
1545 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21);
1546 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07);
1548 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a);
1549 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10);
1550 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1551 sc->msk_hw_rev == CHIP_REV_YU_XL_A0)
1552 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04);
1554 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10);
1555 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190);
1558 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
1560 CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000));
1562 /* Enable status unit. */
1563 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON);
1565 CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START);
1566 CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START);
1567 CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START);
1571 msk_probe(device_t dev)
1573 struct msk_softc *sc;
1576 sc = device_get_softc(device_get_parent(dev));
1578 * Not much to do here. We always know there will be
1579 * at least one GMAC present, and if there are two,
1580 * mskc_attach() will create a second device instance
1583 snprintf(desc, sizeof(desc),
1584 "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x",
1585 model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id,
1587 device_set_desc_copy(dev, desc);
1589 return (BUS_PROBE_DEFAULT);
1593 msk_attach(device_t dev)
1595 struct msk_softc *sc;
1596 struct msk_if_softc *sc_if;
1598 struct msk_mii_data *mmd;
1606 sc_if = device_get_softc(dev);
1607 sc = device_get_softc(device_get_parent(dev));
1608 mmd = device_get_ivars(dev);
1611 sc_if->msk_if_dev = dev;
1612 sc_if->msk_port = port;
1613 sc_if->msk_softc = sc;
1614 sc_if->msk_flags = sc->msk_pflags;
1615 sc->msk_if[port] = sc_if;
1616 /* Setup Tx/Rx queue register offsets. */
1617 if (port == MSK_PORT_A) {
1618 sc_if->msk_txq = Q_XA1;
1619 sc_if->msk_txsq = Q_XS1;
1620 sc_if->msk_rxq = Q_R1;
1622 sc_if->msk_txq = Q_XA2;
1623 sc_if->msk_txsq = Q_XS2;
1624 sc_if->msk_rxq = Q_R2;
1627 callout_init_mtx(&sc_if->msk_tick_ch, &sc_if->msk_softc->msk_mtx, 0);
1628 msk_sysctl_node(sc_if);
1630 if ((error = msk_txrx_dma_alloc(sc_if) != 0))
1632 msk_rx_dma_jalloc(sc_if);
1634 ifp = sc_if->msk_ifp = if_alloc(IFT_ETHER);
1636 device_printf(sc_if->msk_if_dev, "can not if_alloc()\n");
1640 ifp->if_softc = sc_if;
1641 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1642 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1643 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4;
1645 * Enable Rx checksum offloading if controller supports
1646 * new descriptor formant and controller is not Yukon XL.
1648 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
1649 sc->msk_hw_id != CHIP_ID_YUKON_XL)
1650 ifp->if_capabilities |= IFCAP_RXCSUM;
1651 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 &&
1652 (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0)
1653 ifp->if_capabilities |= IFCAP_RXCSUM;
1654 ifp->if_hwassist = MSK_CSUM_FEATURES | CSUM_TSO;
1655 ifp->if_capenable = ifp->if_capabilities;
1656 ifp->if_ioctl = msk_ioctl;
1657 ifp->if_start = msk_start;
1658 ifp->if_init = msk_init;
1659 IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1);
1660 ifp->if_snd.ifq_drv_maxlen = MSK_TX_RING_CNT - 1;
1661 IFQ_SET_READY(&ifp->if_snd);
1663 * Get station address for this interface. Note that
1664 * dual port cards actually come with three station
1665 * addresses: one for each port, plus an extra. The
1666 * extra one is used by the SysKonnect driver software
1667 * as a 'virtual' station address for when both ports
1668 * are operating in failover mode. Currently we don't
1669 * use this extra address.
1672 for (i = 0; i < ETHER_ADDR_LEN; i++)
1673 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i);
1676 * Call MI attach routine. Can't hold locks when calling into ether_*.
1678 MSK_IF_UNLOCK(sc_if);
1679 ether_ifattach(ifp, eaddr);
1682 /* VLAN capability setup */
1683 ifp->if_capabilities |= IFCAP_VLAN_MTU;
1684 if ((sc_if->msk_flags & MSK_FLAG_NOHWVLAN) == 0) {
1686 * Due to Tx checksum offload hardware bugs, msk(4) manually
1687 * computes checksum for short frames. For VLAN tagged frames
1688 * this workaround does not work so disable checksum offload
1689 * for VLAN interface.
1691 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO;
1693 * Enable Rx checksum offloading for VLAN tagged frames
1694 * if controller support new descriptor format.
1696 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 &&
1697 (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0)
1698 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
1700 ifp->if_capenable = ifp->if_capabilities;
1702 * Disable RX checksum offloading on controllers that don't use
1703 * new descriptor format but give chance to enable it.
1705 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0)
1706 ifp->if_capenable &= ~IFCAP_RXCSUM;
1709 * Tell the upper layer(s) we support long frames.
1710 * Must appear after the call to ether_ifattach() because
1711 * ether_ifattach() sets ifi_hdrlen to the default value.
1713 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1718 MSK_IF_UNLOCK(sc_if);
1719 error = mii_attach(dev, &sc_if->msk_miibus, ifp, msk_mediachange,
1720 msk_mediastatus, BMSR_DEFCAPMASK, PHY_ADDR_MARV, MII_OFFSET_ANY,
1723 device_printf(sc_if->msk_if_dev, "attaching PHYs failed\n");
1724 ether_ifdetach(ifp);
1731 /* Access should be ok even though lock has been dropped */
1732 sc->msk_if[port] = NULL;
1740 * Attach the interface. Allocate softc structures, do ifmedia
1741 * setup and ethernet/BPF attach.
1744 mskc_attach(device_t dev)
1746 struct msk_softc *sc;
1747 struct msk_mii_data *mmd;
1748 int error, msic, msir, reg;
1750 sc = device_get_softc(dev);
1752 mtx_init(&sc->msk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1756 * Map control/status registers.
1758 pci_enable_busmaster(dev);
1760 /* Allocate I/O resource */
1761 #ifdef MSK_USEIOSPACE
1762 sc->msk_res_spec = msk_res_spec_io;
1764 sc->msk_res_spec = msk_res_spec_mem;
1766 sc->msk_irq_spec = msk_irq_spec_legacy;
1767 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
1769 if (sc->msk_res_spec == msk_res_spec_mem)
1770 sc->msk_res_spec = msk_res_spec_io;
1772 sc->msk_res_spec = msk_res_spec_mem;
1773 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
1775 device_printf(dev, "couldn't allocate %s resources\n",
1776 sc->msk_res_spec == msk_res_spec_mem ? "memory" :
1778 mtx_destroy(&sc->msk_mtx);
1783 /* Enable all clocks before accessing any registers. */
1784 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0);
1786 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1787 sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID);
1788 sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f;
1789 /* Bail out if chip is not recognized. */
1790 if (sc->msk_hw_id < CHIP_ID_YUKON_XL ||
1791 sc->msk_hw_id > CHIP_ID_YUKON_OPT ||
1792 sc->msk_hw_id == CHIP_ID_YUKON_UNKNOWN) {
1793 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n",
1794 sc->msk_hw_id, sc->msk_hw_rev);
1795 mtx_destroy(&sc->msk_mtx);
1799 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1800 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1801 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
1802 &sc->msk_process_limit, 0, sysctl_hw_msk_proc_limit, "I",
1803 "max number of Rx events to process");
1805 sc->msk_process_limit = MSK_PROC_DEFAULT;
1806 error = resource_int_value(device_get_name(dev), device_get_unit(dev),
1807 "process_limit", &sc->msk_process_limit);
1809 if (sc->msk_process_limit < MSK_PROC_MIN ||
1810 sc->msk_process_limit > MSK_PROC_MAX) {
1811 device_printf(dev, "process_limit value out of range; "
1812 "using default: %d\n", MSK_PROC_DEFAULT);
1813 sc->msk_process_limit = MSK_PROC_DEFAULT;
1817 sc->msk_int_holdoff = MSK_INT_HOLDOFF_DEFAULT;
1818 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
1819 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
1820 "int_holdoff", CTLFLAG_RW, &sc->msk_int_holdoff, 0,
1821 "Maximum number of time to delay interrupts");
1822 resource_int_value(device_get_name(dev), device_get_unit(dev),
1823 "int_holdoff", &sc->msk_int_holdoff);
1825 sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP);
1826 /* Check number of MACs. */
1827 sc->msk_num_port = 1;
1828 if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) ==
1830 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
1834 /* Check bus type. */
1835 if (pci_find_cap(sc->msk_dev, PCIY_EXPRESS, ®) == 0) {
1836 sc->msk_bustype = MSK_PEX_BUS;
1837 sc->msk_expcap = reg;
1838 } else if (pci_find_cap(sc->msk_dev, PCIY_PCIX, ®) == 0) {
1839 sc->msk_bustype = MSK_PCIX_BUS;
1840 sc->msk_pcixcap = reg;
1842 sc->msk_bustype = MSK_PCI_BUS;
1844 switch (sc->msk_hw_id) {
1845 case CHIP_ID_YUKON_EC:
1846 sc->msk_clock = 125; /* 125 MHz */
1847 sc->msk_pflags |= MSK_FLAG_JUMBO;
1849 case CHIP_ID_YUKON_EC_U:
1850 sc->msk_clock = 125; /* 125 MHz */
1851 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_JUMBO_NOCSUM;
1853 case CHIP_ID_YUKON_EX:
1854 sc->msk_clock = 125; /* 125 MHz */
1855 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2 |
1856 MSK_FLAG_AUTOTX_CSUM;
1858 * Yukon Extreme seems to have silicon bug for
1859 * automatic Tx checksum calculation capability.
1861 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0)
1862 sc->msk_pflags &= ~MSK_FLAG_AUTOTX_CSUM;
1864 * Yukon Extreme A0 could not use store-and-forward
1865 * for jumbo frames, so disable Tx checksum
1866 * offloading for jumbo frames.
1868 if (sc->msk_hw_rev == CHIP_REV_YU_EX_A0)
1869 sc->msk_pflags |= MSK_FLAG_JUMBO_NOCSUM;
1871 case CHIP_ID_YUKON_FE:
1872 sc->msk_clock = 100; /* 100 MHz */
1873 sc->msk_pflags |= MSK_FLAG_FASTETHER;
1875 case CHIP_ID_YUKON_FE_P:
1876 sc->msk_clock = 50; /* 50 MHz */
1877 sc->msk_pflags |= MSK_FLAG_FASTETHER | MSK_FLAG_DESCV2 |
1878 MSK_FLAG_AUTOTX_CSUM;
1879 if (sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
1882 * FE+ A0 has status LE writeback bug so msk(4)
1883 * does not rely on status word of received frame
1884 * in msk_rxeof() which in turn disables all
1885 * hardware assistance bits reported by the status
1886 * word as well as validity of the received frame.
1887 * Just pass received frames to upper stack with
1888 * minimal test and let upper stack handle them.
1890 sc->msk_pflags |= MSK_FLAG_NOHWVLAN |
1891 MSK_FLAG_NORXCHK | MSK_FLAG_NORX_CSUM;
1894 case CHIP_ID_YUKON_XL:
1895 sc->msk_clock = 156; /* 156 MHz */
1896 sc->msk_pflags |= MSK_FLAG_JUMBO;
1898 case CHIP_ID_YUKON_SUPR:
1899 sc->msk_clock = 125; /* 125 MHz */
1900 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2 |
1901 MSK_FLAG_AUTOTX_CSUM;
1903 case CHIP_ID_YUKON_UL_2:
1904 sc->msk_clock = 125; /* 125 MHz */
1905 sc->msk_pflags |= MSK_FLAG_JUMBO;
1907 case CHIP_ID_YUKON_OPT:
1908 sc->msk_clock = 125; /* 125 MHz */
1909 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2;
1912 sc->msk_clock = 156; /* 156 MHz */
1916 /* Allocate IRQ resources. */
1917 msic = pci_msi_count(dev);
1919 device_printf(dev, "MSI count : %d\n", msic);
1920 if (legacy_intr != 0)
1922 if (msi_disable == 0 && msic > 0) {
1924 if (pci_alloc_msi(dev, &msir) == 0) {
1926 sc->msk_pflags |= MSK_FLAG_MSI;
1927 sc->msk_irq_spec = msk_irq_spec_msi;
1929 pci_release_msi(dev);
1933 error = bus_alloc_resources(dev, sc->msk_irq_spec, sc->msk_irq);
1935 device_printf(dev, "couldn't allocate IRQ resources\n");
1939 if ((error = msk_status_dma_alloc(sc)) != 0)
1942 /* Set base interrupt mask. */
1943 sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU;
1944 sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR |
1945 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP;
1947 /* Reset the adapter. */
1950 if ((error = mskc_setup_rambuffer(sc)) != 0)
1953 sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1);
1954 if (sc->msk_devs[MSK_PORT_A] == NULL) {
1955 device_printf(dev, "failed to add child for PORT_A\n");
1959 mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK | M_ZERO);
1961 device_printf(dev, "failed to allocate memory for "
1962 "ivars of PORT_A\n");
1966 mmd->port = MSK_PORT_A;
1967 mmd->pmd = sc->msk_pmd;
1968 mmd->mii_flags |= MIIF_DOPAUSE;
1969 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
1970 mmd->mii_flags |= MIIF_HAVEFIBER;
1971 if (sc->msk_pmd == 'P')
1972 mmd->mii_flags |= MIIF_HAVEFIBER | MIIF_MACPRIV0;
1973 device_set_ivars(sc->msk_devs[MSK_PORT_A], mmd);
1975 if (sc->msk_num_port > 1) {
1976 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1);
1977 if (sc->msk_devs[MSK_PORT_B] == NULL) {
1978 device_printf(dev, "failed to add child for PORT_B\n");
1982 mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK |
1985 device_printf(dev, "failed to allocate memory for "
1986 "ivars of PORT_B\n");
1990 mmd->port = MSK_PORT_B;
1991 mmd->pmd = sc->msk_pmd;
1992 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
1993 mmd->mii_flags |= MIIF_HAVEFIBER;
1994 if (sc->msk_pmd == 'P')
1995 mmd->mii_flags |= MIIF_HAVEFIBER | MIIF_MACPRIV0;
1996 device_set_ivars(sc->msk_devs[MSK_PORT_B], mmd);
1999 error = bus_generic_attach(dev);
2001 device_printf(dev, "failed to attach port(s)\n");
2005 /* Hook interrupt last to avoid having to lock softc. */
2006 error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET |
2007 INTR_MPSAFE, NULL, msk_intr, sc, &sc->msk_intrhand);
2009 device_printf(dev, "couldn't set up interrupt handler\n");
2020 * Shutdown hardware and free up resources. This can be called any
2021 * time after the mutex has been initialized. It is called in both
2022 * the error case in attach and the normal detach case so it needs
2023 * to be careful about only freeing resources that have actually been
2027 msk_detach(device_t dev)
2029 struct msk_softc *sc;
2030 struct msk_if_softc *sc_if;
2033 sc_if = device_get_softc(dev);
2034 KASSERT(mtx_initialized(&sc_if->msk_softc->msk_mtx),
2035 ("msk mutex not initialized in msk_detach"));
2038 ifp = sc_if->msk_ifp;
2039 if (device_is_attached(dev)) {
2041 sc_if->msk_flags |= MSK_FLAG_DETACH;
2043 /* Can't hold locks while calling detach. */
2044 MSK_IF_UNLOCK(sc_if);
2045 callout_drain(&sc_if->msk_tick_ch);
2047 ether_ifdetach(ifp);
2052 * We're generally called from mskc_detach() which is using
2053 * device_delete_child() to get to here. It's already trashed
2054 * miibus for us, so don't do it here or we'll panic.
2056 * if (sc_if->msk_miibus != NULL) {
2057 * device_delete_child(dev, sc_if->msk_miibus);
2058 * sc_if->msk_miibus = NULL;
2062 msk_rx_dma_jfree(sc_if);
2063 msk_txrx_dma_free(sc_if);
2064 bus_generic_detach(dev);
2068 sc = sc_if->msk_softc;
2069 sc->msk_if[sc_if->msk_port] = NULL;
2070 MSK_IF_UNLOCK(sc_if);
2076 mskc_detach(device_t dev)
2078 struct msk_softc *sc;
2080 sc = device_get_softc(dev);
2081 KASSERT(mtx_initialized(&sc->msk_mtx), ("msk mutex not initialized"));
2083 if (device_is_alive(dev)) {
2084 if (sc->msk_devs[MSK_PORT_A] != NULL) {
2085 free(device_get_ivars(sc->msk_devs[MSK_PORT_A]),
2087 device_delete_child(dev, sc->msk_devs[MSK_PORT_A]);
2089 if (sc->msk_devs[MSK_PORT_B] != NULL) {
2090 free(device_get_ivars(sc->msk_devs[MSK_PORT_B]),
2092 device_delete_child(dev, sc->msk_devs[MSK_PORT_B]);
2094 bus_generic_detach(dev);
2097 /* Disable all interrupts. */
2098 CSR_WRITE_4(sc, B0_IMSK, 0);
2099 CSR_READ_4(sc, B0_IMSK);
2100 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
2101 CSR_READ_4(sc, B0_HWE_IMSK);
2104 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF);
2106 /* Put hardware reset. */
2107 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2109 msk_status_dma_free(sc);
2111 if (sc->msk_intrhand) {
2112 bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand);
2113 sc->msk_intrhand = NULL;
2115 bus_release_resources(dev, sc->msk_irq_spec, sc->msk_irq);
2116 if ((sc->msk_pflags & MSK_FLAG_MSI) != 0)
2117 pci_release_msi(dev);
2118 bus_release_resources(dev, sc->msk_res_spec, sc->msk_res);
2119 mtx_destroy(&sc->msk_mtx);
2124 static bus_dma_tag_t
2125 mskc_get_dma_tag(device_t bus, device_t child __unused)
2128 return (bus_get_dma_tag(bus));
2131 struct msk_dmamap_arg {
2132 bus_addr_t msk_busaddr;
2136 msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2138 struct msk_dmamap_arg *ctx;
2143 ctx->msk_busaddr = segs[0].ds_addr;
2146 /* Create status DMA region. */
2148 msk_status_dma_alloc(struct msk_softc *sc)
2150 struct msk_dmamap_arg ctx;
2155 * It seems controller requires number of status LE entries
2156 * is power of 2 and the maximum number of status LE entries
2157 * is 4096. For dual-port controllers, the number of status
2158 * LE entries should be large enough to hold both port's
2161 count = 3 * MSK_RX_RING_CNT + MSK_TX_RING_CNT;
2162 count = imin(4096, roundup2(count, 1024));
2163 sc->msk_stat_count = count;
2164 stat_sz = count * sizeof(struct msk_stat_desc);
2165 error = bus_dma_tag_create(
2166 bus_get_dma_tag(sc->msk_dev), /* parent */
2167 MSK_STAT_ALIGN, 0, /* alignment, boundary */
2168 BUS_SPACE_MAXADDR, /* lowaddr */
2169 BUS_SPACE_MAXADDR, /* highaddr */
2170 NULL, NULL, /* filter, filterarg */
2171 stat_sz, /* maxsize */
2173 stat_sz, /* maxsegsize */
2175 NULL, NULL, /* lockfunc, lockarg */
2178 device_printf(sc->msk_dev,
2179 "failed to create status DMA tag\n");
2183 /* Allocate DMA'able memory and load the DMA map for status ring. */
2184 error = bus_dmamem_alloc(sc->msk_stat_tag,
2185 (void **)&sc->msk_stat_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
2186 BUS_DMA_ZERO, &sc->msk_stat_map);
2188 device_printf(sc->msk_dev,
2189 "failed to allocate DMA'able memory for status ring\n");
2193 ctx.msk_busaddr = 0;
2194 error = bus_dmamap_load(sc->msk_stat_tag, sc->msk_stat_map,
2195 sc->msk_stat_ring, stat_sz, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
2197 device_printf(sc->msk_dev,
2198 "failed to load DMA'able memory for status ring\n");
2201 sc->msk_stat_ring_paddr = ctx.msk_busaddr;
2207 msk_status_dma_free(struct msk_softc *sc)
2210 /* Destroy status block. */
2211 if (sc->msk_stat_tag) {
2212 if (sc->msk_stat_map) {
2213 bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map);
2214 if (sc->msk_stat_ring) {
2215 bus_dmamem_free(sc->msk_stat_tag,
2216 sc->msk_stat_ring, sc->msk_stat_map);
2217 sc->msk_stat_ring = NULL;
2219 sc->msk_stat_map = NULL;
2221 bus_dma_tag_destroy(sc->msk_stat_tag);
2222 sc->msk_stat_tag = NULL;
2227 msk_txrx_dma_alloc(struct msk_if_softc *sc_if)
2229 struct msk_dmamap_arg ctx;
2230 struct msk_txdesc *txd;
2231 struct msk_rxdesc *rxd;
2235 /* Create parent DMA tag. */
2236 error = bus_dma_tag_create(
2237 bus_get_dma_tag(sc_if->msk_if_dev), /* parent */
2238 1, 0, /* alignment, boundary */
2239 BUS_SPACE_MAXADDR, /* lowaddr */
2240 BUS_SPACE_MAXADDR, /* highaddr */
2241 NULL, NULL, /* filter, filterarg */
2242 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
2244 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
2246 NULL, NULL, /* lockfunc, lockarg */
2247 &sc_if->msk_cdata.msk_parent_tag);
2249 device_printf(sc_if->msk_if_dev,
2250 "failed to create parent DMA tag\n");
2253 /* Create tag for Tx ring. */
2254 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2255 MSK_RING_ALIGN, 0, /* alignment, boundary */
2256 BUS_SPACE_MAXADDR, /* lowaddr */
2257 BUS_SPACE_MAXADDR, /* highaddr */
2258 NULL, NULL, /* filter, filterarg */
2259 MSK_TX_RING_SZ, /* maxsize */
2261 MSK_TX_RING_SZ, /* maxsegsize */
2263 NULL, NULL, /* lockfunc, lockarg */
2264 &sc_if->msk_cdata.msk_tx_ring_tag);
2266 device_printf(sc_if->msk_if_dev,
2267 "failed to create Tx ring DMA tag\n");
2271 /* Create tag for Rx ring. */
2272 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2273 MSK_RING_ALIGN, 0, /* alignment, boundary */
2274 BUS_SPACE_MAXADDR, /* lowaddr */
2275 BUS_SPACE_MAXADDR, /* highaddr */
2276 NULL, NULL, /* filter, filterarg */
2277 MSK_RX_RING_SZ, /* maxsize */
2279 MSK_RX_RING_SZ, /* maxsegsize */
2281 NULL, NULL, /* lockfunc, lockarg */
2282 &sc_if->msk_cdata.msk_rx_ring_tag);
2284 device_printf(sc_if->msk_if_dev,
2285 "failed to create Rx ring DMA tag\n");
2289 /* Create tag for Tx buffers. */
2290 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2291 1, 0, /* alignment, boundary */
2292 BUS_SPACE_MAXADDR, /* lowaddr */
2293 BUS_SPACE_MAXADDR, /* highaddr */
2294 NULL, NULL, /* filter, filterarg */
2295 MSK_TSO_MAXSIZE, /* maxsize */
2296 MSK_MAXTXSEGS, /* nsegments */
2297 MSK_TSO_MAXSGSIZE, /* maxsegsize */
2299 NULL, NULL, /* lockfunc, lockarg */
2300 &sc_if->msk_cdata.msk_tx_tag);
2302 device_printf(sc_if->msk_if_dev,
2303 "failed to create Tx DMA tag\n");
2309 * Workaround hardware hang which seems to happen when Rx buffer
2310 * is not aligned on multiple of FIFO word(8 bytes).
2312 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
2313 rxalign = MSK_RX_BUF_ALIGN;
2314 /* Create tag for Rx buffers. */
2315 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2316 rxalign, 0, /* alignment, boundary */
2317 BUS_SPACE_MAXADDR, /* lowaddr */
2318 BUS_SPACE_MAXADDR, /* highaddr */
2319 NULL, NULL, /* filter, filterarg */
2320 MCLBYTES, /* maxsize */
2322 MCLBYTES, /* maxsegsize */
2324 NULL, NULL, /* lockfunc, lockarg */
2325 &sc_if->msk_cdata.msk_rx_tag);
2327 device_printf(sc_if->msk_if_dev,
2328 "failed to create Rx DMA tag\n");
2332 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
2333 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_tx_ring_tag,
2334 (void **)&sc_if->msk_rdata.msk_tx_ring, BUS_DMA_WAITOK |
2335 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_tx_ring_map);
2337 device_printf(sc_if->msk_if_dev,
2338 "failed to allocate DMA'able memory for Tx ring\n");
2342 ctx.msk_busaddr = 0;
2343 error = bus_dmamap_load(sc_if->msk_cdata.msk_tx_ring_tag,
2344 sc_if->msk_cdata.msk_tx_ring_map, sc_if->msk_rdata.msk_tx_ring,
2345 MSK_TX_RING_SZ, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
2347 device_printf(sc_if->msk_if_dev,
2348 "failed to load DMA'able memory for Tx ring\n");
2351 sc_if->msk_rdata.msk_tx_ring_paddr = ctx.msk_busaddr;
2353 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
2354 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_rx_ring_tag,
2355 (void **)&sc_if->msk_rdata.msk_rx_ring, BUS_DMA_WAITOK |
2356 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_rx_ring_map);
2358 device_printf(sc_if->msk_if_dev,
2359 "failed to allocate DMA'able memory for Rx ring\n");
2363 ctx.msk_busaddr = 0;
2364 error = bus_dmamap_load(sc_if->msk_cdata.msk_rx_ring_tag,
2365 sc_if->msk_cdata.msk_rx_ring_map, sc_if->msk_rdata.msk_rx_ring,
2366 MSK_RX_RING_SZ, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
2368 device_printf(sc_if->msk_if_dev,
2369 "failed to load DMA'able memory for Rx ring\n");
2372 sc_if->msk_rdata.msk_rx_ring_paddr = ctx.msk_busaddr;
2374 /* Create DMA maps for Tx buffers. */
2375 for (i = 0; i < MSK_TX_RING_CNT; i++) {
2376 txd = &sc_if->msk_cdata.msk_txdesc[i];
2378 txd->tx_dmamap = NULL;
2379 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0,
2382 device_printf(sc_if->msk_if_dev,
2383 "failed to create Tx dmamap\n");
2387 /* Create DMA maps for Rx buffers. */
2388 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2389 &sc_if->msk_cdata.msk_rx_sparemap)) != 0) {
2390 device_printf(sc_if->msk_if_dev,
2391 "failed to create spare Rx dmamap\n");
2394 for (i = 0; i < MSK_RX_RING_CNT; i++) {
2395 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2397 rxd->rx_dmamap = NULL;
2398 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2401 device_printf(sc_if->msk_if_dev,
2402 "failed to create Rx dmamap\n");
2412 msk_rx_dma_jalloc(struct msk_if_softc *sc_if)
2414 struct msk_dmamap_arg ctx;
2415 struct msk_rxdesc *jrxd;
2419 if (jumbo_disable != 0 || (sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) {
2420 sc_if->msk_flags &= ~MSK_FLAG_JUMBO;
2421 device_printf(sc_if->msk_if_dev,
2422 "disabling jumbo frame support\n");
2425 /* Create tag for jumbo Rx ring. */
2426 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2427 MSK_RING_ALIGN, 0, /* alignment, boundary */
2428 BUS_SPACE_MAXADDR, /* lowaddr */
2429 BUS_SPACE_MAXADDR, /* highaddr */
2430 NULL, NULL, /* filter, filterarg */
2431 MSK_JUMBO_RX_RING_SZ, /* maxsize */
2433 MSK_JUMBO_RX_RING_SZ, /* maxsegsize */
2435 NULL, NULL, /* lockfunc, lockarg */
2436 &sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2438 device_printf(sc_if->msk_if_dev,
2439 "failed to create jumbo Rx ring DMA tag\n");
2445 * Workaround hardware hang which seems to happen when Rx buffer
2446 * is not aligned on multiple of FIFO word(8 bytes).
2448 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
2449 rxalign = MSK_RX_BUF_ALIGN;
2450 /* Create tag for jumbo Rx buffers. */
2451 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2452 rxalign, 0, /* alignment, boundary */
2453 BUS_SPACE_MAXADDR, /* lowaddr */
2454 BUS_SPACE_MAXADDR, /* highaddr */
2455 NULL, NULL, /* filter, filterarg */
2456 MJUM9BYTES, /* maxsize */
2458 MJUM9BYTES, /* maxsegsize */
2460 NULL, NULL, /* lockfunc, lockarg */
2461 &sc_if->msk_cdata.msk_jumbo_rx_tag);
2463 device_printf(sc_if->msk_if_dev,
2464 "failed to create jumbo Rx DMA tag\n");
2468 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
2469 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2470 (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring,
2471 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
2472 &sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2474 device_printf(sc_if->msk_if_dev,
2475 "failed to allocate DMA'able memory for jumbo Rx ring\n");
2479 ctx.msk_busaddr = 0;
2480 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2481 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
2482 sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ,
2483 msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
2485 device_printf(sc_if->msk_if_dev,
2486 "failed to load DMA'able memory for jumbo Rx ring\n");
2489 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr;
2491 /* Create DMA maps for jumbo Rx buffers. */
2492 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2493 &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) {
2494 device_printf(sc_if->msk_if_dev,
2495 "failed to create spare jumbo Rx dmamap\n");
2498 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2499 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2501 jrxd->rx_dmamap = NULL;
2502 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2505 device_printf(sc_if->msk_if_dev,
2506 "failed to create jumbo Rx dmamap\n");
2514 msk_rx_dma_jfree(sc_if);
2515 device_printf(sc_if->msk_if_dev, "disabling jumbo frame support "
2516 "due to resource shortage\n");
2517 sc_if->msk_flags &= ~MSK_FLAG_JUMBO;
2522 msk_txrx_dma_free(struct msk_if_softc *sc_if)
2524 struct msk_txdesc *txd;
2525 struct msk_rxdesc *rxd;
2529 if (sc_if->msk_cdata.msk_tx_ring_tag) {
2530 if (sc_if->msk_cdata.msk_tx_ring_map)
2531 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_ring_tag,
2532 sc_if->msk_cdata.msk_tx_ring_map);
2533 if (sc_if->msk_cdata.msk_tx_ring_map &&
2534 sc_if->msk_rdata.msk_tx_ring)
2535 bus_dmamem_free(sc_if->msk_cdata.msk_tx_ring_tag,
2536 sc_if->msk_rdata.msk_tx_ring,
2537 sc_if->msk_cdata.msk_tx_ring_map);
2538 sc_if->msk_rdata.msk_tx_ring = NULL;
2539 sc_if->msk_cdata.msk_tx_ring_map = NULL;
2540 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_ring_tag);
2541 sc_if->msk_cdata.msk_tx_ring_tag = NULL;
2544 if (sc_if->msk_cdata.msk_rx_ring_tag) {
2545 if (sc_if->msk_cdata.msk_rx_ring_map)
2546 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_ring_tag,
2547 sc_if->msk_cdata.msk_rx_ring_map);
2548 if (sc_if->msk_cdata.msk_rx_ring_map &&
2549 sc_if->msk_rdata.msk_rx_ring)
2550 bus_dmamem_free(sc_if->msk_cdata.msk_rx_ring_tag,
2551 sc_if->msk_rdata.msk_rx_ring,
2552 sc_if->msk_cdata.msk_rx_ring_map);
2553 sc_if->msk_rdata.msk_rx_ring = NULL;
2554 sc_if->msk_cdata.msk_rx_ring_map = NULL;
2555 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_ring_tag);
2556 sc_if->msk_cdata.msk_rx_ring_tag = NULL;
2559 if (sc_if->msk_cdata.msk_tx_tag) {
2560 for (i = 0; i < MSK_TX_RING_CNT; i++) {
2561 txd = &sc_if->msk_cdata.msk_txdesc[i];
2562 if (txd->tx_dmamap) {
2563 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag,
2565 txd->tx_dmamap = NULL;
2568 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag);
2569 sc_if->msk_cdata.msk_tx_tag = NULL;
2572 if (sc_if->msk_cdata.msk_rx_tag) {
2573 for (i = 0; i < MSK_RX_RING_CNT; i++) {
2574 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2575 if (rxd->rx_dmamap) {
2576 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2578 rxd->rx_dmamap = NULL;
2581 if (sc_if->msk_cdata.msk_rx_sparemap) {
2582 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2583 sc_if->msk_cdata.msk_rx_sparemap);
2584 sc_if->msk_cdata.msk_rx_sparemap = 0;
2586 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
2587 sc_if->msk_cdata.msk_rx_tag = NULL;
2589 if (sc_if->msk_cdata.msk_parent_tag) {
2590 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag);
2591 sc_if->msk_cdata.msk_parent_tag = NULL;
2596 msk_rx_dma_jfree(struct msk_if_softc *sc_if)
2598 struct msk_rxdesc *jrxd;
2601 /* Jumbo Rx ring. */
2602 if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) {
2603 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map)
2604 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2605 sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2606 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map &&
2607 sc_if->msk_rdata.msk_jumbo_rx_ring)
2608 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2609 sc_if->msk_rdata.msk_jumbo_rx_ring,
2610 sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2611 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL;
2612 sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL;
2613 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2614 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL;
2616 /* Jumbo Rx buffers. */
2617 if (sc_if->msk_cdata.msk_jumbo_rx_tag) {
2618 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2619 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2620 if (jrxd->rx_dmamap) {
2622 sc_if->msk_cdata.msk_jumbo_rx_tag,
2624 jrxd->rx_dmamap = NULL;
2627 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) {
2628 bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag,
2629 sc_if->msk_cdata.msk_jumbo_rx_sparemap);
2630 sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0;
2632 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag);
2633 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL;
2638 msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
2640 struct msk_txdesc *txd, *txd_last;
2641 struct msk_tx_desc *tx_le;
2644 bus_dma_segment_t txsegs[MSK_MAXTXSEGS];
2645 uint32_t control, csum, prod, si;
2646 uint16_t offset, tcp_offset, tso_mtu;
2647 int error, i, nseg, tso;
2649 MSK_IF_LOCK_ASSERT(sc_if);
2651 tcp_offset = offset = 0;
2653 if (((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 &&
2654 (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) ||
2655 ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
2656 (m->m_pkthdr.csum_flags & CSUM_TSO) != 0)) {
2658 * Since mbuf has no protocol specific structure information
2659 * in it we have to inspect protocol information here to
2660 * setup TSO and checksum offload. I don't know why Marvell
2661 * made a such decision in chip design because other GigE
2662 * hardwares normally takes care of all these chores in
2663 * hardware. However, TSO performance of Yukon II is very
2664 * good such that it's worth to implement it.
2666 struct ether_header *eh;
2670 if (M_WRITABLE(m) == 0) {
2671 /* Get a writable copy. */
2672 m = m_dup(*m_head, M_NOWAIT);
2681 offset = sizeof(struct ether_header);
2682 m = m_pullup(m, offset);
2687 eh = mtod(m, struct ether_header *);
2688 /* Check if hardware VLAN insertion is off. */
2689 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2690 offset = sizeof(struct ether_vlan_header);
2691 m = m_pullup(m, offset);
2697 m = m_pullup(m, offset + sizeof(struct ip));
2702 ip = (struct ip *)(mtod(m, char *) + offset);
2703 offset += (ip->ip_hl << 2);
2704 tcp_offset = offset;
2705 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2706 m = m_pullup(m, offset + sizeof(struct tcphdr));
2711 tcp = (struct tcphdr *)(mtod(m, char *) + offset);
2712 offset += (tcp->th_off << 2);
2713 } else if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 &&
2714 (m->m_pkthdr.len < MSK_MIN_FRAMELEN) &&
2715 (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) {
2717 * It seems that Yukon II has Tx checksum offload bug
2718 * for small TCP packets that's less than 60 bytes in
2719 * size (e.g. TCP window probe packet, pure ACK packet).
2720 * Common work around like padding with zeros to make
2721 * the frame minimum ethernet frame size didn't work at
2723 * Instead of disabling checksum offload completely we
2724 * resort to S/W checksum routine when we encounter
2726 * Short UDP packets appear to be handled correctly by
2727 * Yukon II. Also I assume this bug does not happen on
2728 * controllers that use newer descriptor format or
2729 * automatic Tx checksum calculation.
2731 m = m_pullup(m, offset + sizeof(struct tcphdr));
2736 *(uint16_t *)(m->m_data + offset +
2737 m->m_pkthdr.csum_data) = in_cksum_skip(m,
2738 m->m_pkthdr.len, offset);
2739 m->m_pkthdr.csum_flags &= ~CSUM_TCP;
2744 prod = sc_if->msk_cdata.msk_tx_prod;
2745 txd = &sc_if->msk_cdata.msk_txdesc[prod];
2747 map = txd->tx_dmamap;
2748 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, map,
2749 *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2750 if (error == EFBIG) {
2751 m = m_collapse(*m_head, M_NOWAIT, MSK_MAXTXSEGS);
2758 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag,
2759 map, *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2765 } else if (error != 0)
2773 /* Check number of available descriptors. */
2774 if (sc_if->msk_cdata.msk_tx_cnt + nseg >=
2775 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) {
2776 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map);
2784 /* Check TSO support. */
2785 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2786 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0)
2787 tso_mtu = m->m_pkthdr.tso_segsz;
2789 tso_mtu = offset + m->m_pkthdr.tso_segsz;
2790 if (tso_mtu != sc_if->msk_cdata.msk_tso_mtu) {
2791 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2792 tx_le->msk_addr = htole32(tso_mtu);
2793 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0)
2794 tx_le->msk_control = htole32(OP_MSS | HW_OWNER);
2796 tx_le->msk_control =
2797 htole32(OP_LRGLEN | HW_OWNER);
2798 sc_if->msk_cdata.msk_tx_cnt++;
2799 MSK_INC(prod, MSK_TX_RING_CNT);
2800 sc_if->msk_cdata.msk_tso_mtu = tso_mtu;
2804 /* Check if we have a VLAN tag to insert. */
2805 if ((m->m_flags & M_VLANTAG) != 0) {
2806 if (tx_le == NULL) {
2807 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2808 tx_le->msk_addr = htole32(0);
2809 tx_le->msk_control = htole32(OP_VLAN | HW_OWNER |
2810 htons(m->m_pkthdr.ether_vtag));
2811 sc_if->msk_cdata.msk_tx_cnt++;
2812 MSK_INC(prod, MSK_TX_RING_CNT);
2814 tx_le->msk_control |= htole32(OP_VLAN |
2815 htons(m->m_pkthdr.ether_vtag));
2817 control |= INS_VLAN;
2819 /* Check if we have to handle checksum offload. */
2820 if (tso == 0 && (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) {
2821 if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) != 0)
2824 control |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
2825 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2827 /* Checksum write position. */
2828 csum = (tcp_offset + m->m_pkthdr.csum_data) & 0xffff;
2829 /* Checksum start position. */
2830 csum |= (uint32_t)tcp_offset << 16;
2831 if (csum != sc_if->msk_cdata.msk_last_csum) {
2832 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2833 tx_le->msk_addr = htole32(csum);
2834 tx_le->msk_control = htole32(1 << 16 |
2835 (OP_TCPLISW | HW_OWNER));
2836 sc_if->msk_cdata.msk_tx_cnt++;
2837 MSK_INC(prod, MSK_TX_RING_CNT);
2838 sc_if->msk_cdata.msk_last_csum = csum;
2843 #ifdef MSK_64BIT_DMA
2844 if (MSK_ADDR_HI(txsegs[0].ds_addr) !=
2845 sc_if->msk_cdata.msk_tx_high_addr) {
2846 sc_if->msk_cdata.msk_tx_high_addr =
2847 MSK_ADDR_HI(txsegs[0].ds_addr);
2848 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2849 tx_le->msk_addr = htole32(MSK_ADDR_HI(txsegs[0].ds_addr));
2850 tx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
2851 sc_if->msk_cdata.msk_tx_cnt++;
2852 MSK_INC(prod, MSK_TX_RING_CNT);
2856 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2857 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr));
2859 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2862 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2864 sc_if->msk_cdata.msk_tx_cnt++;
2865 MSK_INC(prod, MSK_TX_RING_CNT);
2867 for (i = 1; i < nseg; i++) {
2868 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2869 #ifdef MSK_64BIT_DMA
2870 if (MSK_ADDR_HI(txsegs[i].ds_addr) !=
2871 sc_if->msk_cdata.msk_tx_high_addr) {
2872 sc_if->msk_cdata.msk_tx_high_addr =
2873 MSK_ADDR_HI(txsegs[i].ds_addr);
2874 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2876 htole32(MSK_ADDR_HI(txsegs[i].ds_addr));
2877 tx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
2878 sc_if->msk_cdata.msk_tx_cnt++;
2879 MSK_INC(prod, MSK_TX_RING_CNT);
2880 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2883 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr));
2884 tx_le->msk_control = htole32(txsegs[i].ds_len | control |
2885 OP_BUFFER | HW_OWNER);
2886 sc_if->msk_cdata.msk_tx_cnt++;
2887 MSK_INC(prod, MSK_TX_RING_CNT);
2889 /* Update producer index. */
2890 sc_if->msk_cdata.msk_tx_prod = prod;
2892 /* Set EOP on the last descriptor. */
2893 prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT;
2894 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2895 tx_le->msk_control |= htole32(EOP);
2897 /* Turn the first descriptor ownership to hardware. */
2898 tx_le = &sc_if->msk_rdata.msk_tx_ring[si];
2899 tx_le->msk_control |= htole32(HW_OWNER);
2901 txd = &sc_if->msk_cdata.msk_txdesc[prod];
2902 map = txd_last->tx_dmamap;
2903 txd_last->tx_dmamap = txd->tx_dmamap;
2904 txd->tx_dmamap = map;
2907 /* Sync descriptors. */
2908 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE);
2909 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
2910 sc_if->msk_cdata.msk_tx_ring_map,
2911 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2917 msk_start(struct ifnet *ifp)
2919 struct msk_if_softc *sc_if;
2921 sc_if = ifp->if_softc;
2923 msk_start_locked(ifp);
2924 MSK_IF_UNLOCK(sc_if);
2928 msk_start_locked(struct ifnet *ifp)
2930 struct msk_if_softc *sc_if;
2931 struct mbuf *m_head;
2934 sc_if = ifp->if_softc;
2935 MSK_IF_LOCK_ASSERT(sc_if);
2937 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2938 IFF_DRV_RUNNING || (sc_if->msk_flags & MSK_FLAG_LINK) == 0)
2941 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2942 sc_if->msk_cdata.msk_tx_cnt <
2943 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); ) {
2944 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2948 * Pack the data into the transmit ring. If we
2949 * don't have room, set the OACTIVE flag and wait
2950 * for the NIC to drain the ring.
2952 if (msk_encap(sc_if, &m_head) != 0) {
2955 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2956 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2962 * If there's a BPF listener, bounce a copy of this frame
2965 ETHER_BPF_MTAP(ifp, m_head);
2970 CSR_WRITE_2(sc_if->msk_softc,
2971 Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG),
2972 sc_if->msk_cdata.msk_tx_prod);
2974 /* Set a timeout in case the chip goes out to lunch. */
2975 sc_if->msk_watchdog_timer = MSK_TX_TIMEOUT;
2980 msk_watchdog(struct msk_if_softc *sc_if)
2984 MSK_IF_LOCK_ASSERT(sc_if);
2986 if (sc_if->msk_watchdog_timer == 0 || --sc_if->msk_watchdog_timer)
2988 ifp = sc_if->msk_ifp;
2989 if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0) {
2991 if_printf(sc_if->msk_ifp, "watchdog timeout "
2994 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2995 msk_init_locked(sc_if);
2999 if_printf(ifp, "watchdog timeout\n");
3001 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3002 msk_init_locked(sc_if);
3003 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3004 msk_start_locked(ifp);
3008 mskc_shutdown(device_t dev)
3010 struct msk_softc *sc;
3013 sc = device_get_softc(dev);
3015 for (i = 0; i < sc->msk_num_port; i++) {
3016 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
3017 ((sc->msk_if[i]->msk_ifp->if_drv_flags &
3018 IFF_DRV_RUNNING) != 0))
3019 msk_stop(sc->msk_if[i]);
3023 /* Put hardware reset. */
3024 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
3029 mskc_suspend(device_t dev)
3031 struct msk_softc *sc;
3034 sc = device_get_softc(dev);
3038 for (i = 0; i < sc->msk_num_port; i++) {
3039 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
3040 ((sc->msk_if[i]->msk_ifp->if_drv_flags &
3041 IFF_DRV_RUNNING) != 0))
3042 msk_stop(sc->msk_if[i]);
3045 /* Disable all interrupts. */
3046 CSR_WRITE_4(sc, B0_IMSK, 0);
3047 CSR_READ_4(sc, B0_IMSK);
3048 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
3049 CSR_READ_4(sc, B0_HWE_IMSK);
3051 msk_phy_power(sc, MSK_PHY_POWERDOWN);
3053 /* Put hardware reset. */
3054 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
3055 sc->msk_pflags |= MSK_FLAG_SUSPEND;
3063 mskc_resume(device_t dev)
3065 struct msk_softc *sc;
3068 sc = device_get_softc(dev);
3072 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0);
3074 for (i = 0; i < sc->msk_num_port; i++) {
3075 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
3076 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0)) {
3077 sc->msk_if[i]->msk_ifp->if_drv_flags &=
3079 msk_init_locked(sc->msk_if[i]);
3082 sc->msk_pflags &= ~MSK_FLAG_SUSPEND;
3089 #ifndef __NO_STRICT_ALIGNMENT
3090 static __inline void
3091 msk_fixup_rx(struct mbuf *m)
3094 uint16_t *src, *dst;
3096 src = mtod(m, uint16_t *);
3099 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
3102 m->m_data -= (MSK_RX_BUF_ALIGN - ETHER_ALIGN);
3106 static __inline void
3107 msk_rxcsum(struct msk_if_softc *sc_if, uint32_t control, struct mbuf *m)
3109 struct ether_header *eh;
3112 int32_t hlen, len, pktlen, temp32;
3113 uint16_t csum, *opts;
3115 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0) {
3116 if ((control & (CSS_IPV4 | CSS_IPFRAG)) == CSS_IPV4) {
3117 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3118 if ((control & CSS_IPV4_CSUM_OK) != 0)
3119 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3120 if ((control & (CSS_TCP | CSS_UDP)) != 0 &&
3121 (control & (CSS_TCPUDP_CSUM_OK)) != 0) {
3122 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3124 m->m_pkthdr.csum_data = 0xffff;
3130 * Marvell Yukon controllers that support OP_RXCHKS has known
3131 * to have various Rx checksum offloading bugs. These
3132 * controllers can be configured to compute simple checksum
3133 * at two different positions. So we can compute IP and TCP/UDP
3134 * checksum at the same time. We intentionally have controller
3135 * compute TCP/UDP checksum twice by specifying the same
3136 * checksum start position and compare the result. If the value
3137 * is different it would indicate the hardware logic was wrong.
3139 if ((sc_if->msk_csum & 0xFFFF) != (sc_if->msk_csum >> 16)) {
3141 device_printf(sc_if->msk_if_dev,
3142 "Rx checksum value mismatch!\n");
3145 pktlen = m->m_pkthdr.len;
3146 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
3148 eh = mtod(m, struct ether_header *);
3149 if (eh->ether_type != htons(ETHERTYPE_IP))
3151 ip = (struct ip *)(eh + 1);
3152 if (ip->ip_v != IPVERSION)
3155 hlen = ip->ip_hl << 2;
3156 pktlen -= sizeof(struct ether_header);
3157 if (hlen < sizeof(struct ip))
3159 if (ntohs(ip->ip_len) < hlen)
3161 if (ntohs(ip->ip_len) != pktlen)
3163 if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
3164 return; /* can't handle fragmented packet. */
3168 if (pktlen < (hlen + sizeof(struct tcphdr)))
3172 if (pktlen < (hlen + sizeof(struct udphdr)))
3174 uh = (struct udphdr *)((caddr_t)ip + hlen);
3175 if (uh->uh_sum == 0)
3176 return; /* no checksum */
3181 csum = bswap16(sc_if->msk_csum & 0xFFFF);
3182 /* Checksum fixup for IP options. */
3183 len = hlen - sizeof(struct ip);
3185 opts = (uint16_t *)(ip + 1);
3186 for (; len > 0; len -= sizeof(uint16_t), opts++) {
3187 temp32 = csum - *opts;
3188 temp32 = (temp32 >> 16) + (temp32 & 65535);
3189 csum = temp32 & 65535;
3192 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
3193 m->m_pkthdr.csum_data = csum;
3197 msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control,
3202 struct msk_rxdesc *rxd;
3205 ifp = sc_if->msk_ifp;
3207 MSK_IF_LOCK_ASSERT(sc_if);
3209 cons = sc_if->msk_cdata.msk_rx_cons;
3211 rxlen = status >> 16;
3212 if ((status & GMR_FS_VLAN) != 0 &&
3213 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3214 rxlen -= ETHER_VLAN_ENCAP_LEN;
3215 if ((sc_if->msk_flags & MSK_FLAG_NORXCHK) != 0) {
3217 * For controllers that returns bogus status code
3218 * just do minimal check and let upper stack
3219 * handle this frame.
3221 if (len > MSK_MAX_FRAMELEN || len < ETHER_HDR_LEN) {
3223 msk_discard_rxbuf(sc_if, cons);
3226 } else if (len > sc_if->msk_framesize ||
3227 ((status & GMR_FS_ANY_ERR) != 0) ||
3228 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
3229 /* Don't count flow-control packet as errors. */
3230 if ((status & GMR_FS_GOOD_FC) == 0)
3232 msk_discard_rxbuf(sc_if, cons);
3235 #ifdef MSK_64BIT_DMA
3236 rxd = &sc_if->msk_cdata.msk_rxdesc[(cons + 1) %
3239 rxd = &sc_if->msk_cdata.msk_rxdesc[cons];
3242 if (msk_newbuf(sc_if, cons) != 0) {
3244 /* Reuse old buffer. */
3245 msk_discard_rxbuf(sc_if, cons);
3248 m->m_pkthdr.rcvif = ifp;
3249 m->m_pkthdr.len = m->m_len = len;
3250 #ifndef __NO_STRICT_ALIGNMENT
3251 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
3255 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3256 msk_rxcsum(sc_if, control, m);
3257 /* Check for VLAN tagged packets. */
3258 if ((status & GMR_FS_VLAN) != 0 &&
3259 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
3260 m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
3261 m->m_flags |= M_VLANTAG;
3263 MSK_IF_UNLOCK(sc_if);
3264 (*ifp->if_input)(ifp, m);
3268 MSK_RX_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
3269 MSK_RX_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT);
3273 msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control,
3278 struct msk_rxdesc *jrxd;
3281 ifp = sc_if->msk_ifp;
3283 MSK_IF_LOCK_ASSERT(sc_if);
3285 cons = sc_if->msk_cdata.msk_rx_cons;
3287 rxlen = status >> 16;
3288 if ((status & GMR_FS_VLAN) != 0 &&
3289 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3290 rxlen -= ETHER_VLAN_ENCAP_LEN;
3291 if (len > sc_if->msk_framesize ||
3292 ((status & GMR_FS_ANY_ERR) != 0) ||
3293 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
3294 /* Don't count flow-control packet as errors. */
3295 if ((status & GMR_FS_GOOD_FC) == 0)
3297 msk_discard_jumbo_rxbuf(sc_if, cons);
3300 #ifdef MSK_64BIT_DMA
3301 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[(cons + 1) %
3302 MSK_JUMBO_RX_RING_CNT];
3304 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons];
3307 if (msk_jumbo_newbuf(sc_if, cons) != 0) {
3309 /* Reuse old buffer. */
3310 msk_discard_jumbo_rxbuf(sc_if, cons);
3313 m->m_pkthdr.rcvif = ifp;
3314 m->m_pkthdr.len = m->m_len = len;
3315 #ifndef __NO_STRICT_ALIGNMENT
3316 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
3320 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3321 msk_rxcsum(sc_if, control, m);
3322 /* Check for VLAN tagged packets. */
3323 if ((status & GMR_FS_VLAN) != 0 &&
3324 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
3325 m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
3326 m->m_flags |= M_VLANTAG;
3328 MSK_IF_UNLOCK(sc_if);
3329 (*ifp->if_input)(ifp, m);
3333 MSK_RX_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
3334 MSK_RX_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT);
3338 msk_txeof(struct msk_if_softc *sc_if, int idx)
3340 struct msk_txdesc *txd;
3341 struct msk_tx_desc *cur_tx;
3346 MSK_IF_LOCK_ASSERT(sc_if);
3348 ifp = sc_if->msk_ifp;
3350 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
3351 sc_if->msk_cdata.msk_tx_ring_map,
3352 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3354 * Go through our tx ring and free mbufs for those
3355 * frames that have been sent.
3357 cons = sc_if->msk_cdata.msk_tx_cons;
3359 for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) {
3360 if (sc_if->msk_cdata.msk_tx_cnt <= 0)
3363 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons];
3364 control = le32toh(cur_tx->msk_control);
3365 sc_if->msk_cdata.msk_tx_cnt--;
3366 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3367 if ((control & EOP) == 0)
3369 txd = &sc_if->msk_cdata.msk_txdesc[cons];
3370 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap,
3371 BUS_DMASYNC_POSTWRITE);
3372 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap);
3375 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!",
3382 sc_if->msk_cdata.msk_tx_cons = cons;
3383 if (sc_if->msk_cdata.msk_tx_cnt == 0)
3384 sc_if->msk_watchdog_timer = 0;
3385 /* No need to sync LEs as we didn't update LEs. */
3390 msk_tick(void *xsc_if)
3392 struct msk_if_softc *sc_if;
3393 struct mii_data *mii;
3397 MSK_IF_LOCK_ASSERT(sc_if);
3399 mii = device_get_softc(sc_if->msk_miibus);
3402 if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0)
3403 msk_miibus_statchg(sc_if->msk_if_dev);
3404 msk_handle_events(sc_if->msk_softc);
3405 msk_watchdog(sc_if);
3406 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
3410 msk_intr_phy(struct msk_if_softc *sc_if)
3414 msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
3415 status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
3416 /* Handle FIFO Underrun/Overflow? */
3417 if ((status & PHY_M_IS_FIFO_ERROR))
3418 device_printf(sc_if->msk_if_dev,
3419 "PHY FIFO underrun/overflow.\n");
3423 msk_intr_gmac(struct msk_if_softc *sc_if)
3425 struct msk_softc *sc;
3428 sc = sc_if->msk_softc;
3429 status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3431 /* GMAC Rx FIFO overrun. */
3432 if ((status & GM_IS_RX_FF_OR) != 0)
3433 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
3435 /* GMAC Tx FIFO underrun. */
3436 if ((status & GM_IS_TX_FF_UR) != 0) {
3437 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3439 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n");
3442 * In case of Tx underrun, we may need to flush/reset
3443 * Tx MAC but that would also require resynchronization
3444 * with status LEs. Reinitializing status LEs would
3445 * affect other port in dual MAC configuration so it
3446 * should be avoided as possible as we can.
3447 * Due to lack of documentation it's all vague guess but
3448 * it needs more investigation.
3454 msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status)
3456 struct msk_softc *sc;
3458 sc = sc_if->msk_softc;
3459 if ((status & Y2_IS_PAR_RD1) != 0) {
3460 device_printf(sc_if->msk_if_dev,
3461 "RAM buffer read parity error\n");
3463 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3466 if ((status & Y2_IS_PAR_WR1) != 0) {
3467 device_printf(sc_if->msk_if_dev,
3468 "RAM buffer write parity error\n");
3470 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3473 if ((status & Y2_IS_PAR_MAC1) != 0) {
3474 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n");
3476 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3479 if ((status & Y2_IS_PAR_RX1) != 0) {
3480 device_printf(sc_if->msk_if_dev, "Rx parity error\n");
3482 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR);
3484 if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) {
3485 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n");
3487 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP);
3492 msk_intr_hwerr(struct msk_softc *sc)
3495 uint32_t tlphead[4];
3497 status = CSR_READ_4(sc, B0_HWE_ISRC);
3498 /* Time Stamp timer overflow. */
3499 if ((status & Y2_IS_TIST_OV) != 0)
3500 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
3501 if ((status & Y2_IS_PCI_NEXP) != 0) {
3503 * PCI Express Error occured which is not described in PEX
3505 * This error is also mapped either to Master Abort(
3506 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
3507 * can only be cleared there.
3509 device_printf(sc->msk_dev,
3510 "PCI Express protocol violation error\n");
3513 if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) {
3516 if ((status & Y2_IS_MST_ERR) != 0)
3517 device_printf(sc->msk_dev,
3518 "unexpected IRQ Status error\n");
3520 device_printf(sc->msk_dev,
3521 "unexpected IRQ Master error\n");
3522 /* Reset all bits in the PCI status register. */
3523 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
3524 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3525 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 |
3526 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
3527 PCIM_STATUS_RTABORT | PCIM_STATUS_MDPERR, 2);
3528 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3531 /* Check for PCI Express Uncorrectable Error. */
3532 if ((status & Y2_IS_PCI_EXP) != 0) {
3536 * On PCI Express bus bridges are called root complexes (RC).
3537 * PCI Express errors are recognized by the root complex too,
3538 * which requests the system to handle the problem. After
3539 * error occurrence it may be that no access to the adapter
3540 * may be performed any longer.
3543 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
3544 if ((v32 & PEX_UNSUP_REQ) != 0) {
3545 /* Ignore unsupported request error. */
3546 device_printf(sc->msk_dev,
3547 "Uncorrectable PCI Express error\n");
3549 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) {
3552 /* Get TLP header form Log Registers. */
3553 for (i = 0; i < 4; i++)
3554 tlphead[i] = CSR_PCI_READ_4(sc,
3555 PEX_HEADER_LOG + i * 4);
3556 /* Check for vendor defined broadcast message. */
3557 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) {
3558 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
3559 CSR_WRITE_4(sc, B0_HWE_IMSK,
3560 sc->msk_intrhwemask);
3561 CSR_READ_4(sc, B0_HWE_IMSK);
3564 /* Clear the interrupt. */
3565 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3566 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
3567 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3570 if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL)
3571 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status);
3572 if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL)
3573 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8);
3576 static __inline void
3577 msk_rxput(struct msk_if_softc *sc_if)
3579 struct msk_softc *sc;
3581 sc = sc_if->msk_softc;
3582 if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN))
3584 sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
3585 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
3586 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3589 sc_if->msk_cdata.msk_rx_ring_tag,
3590 sc_if->msk_cdata.msk_rx_ring_map,
3591 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3592 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq,
3593 PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod);
3597 msk_handle_events(struct msk_softc *sc)
3599 struct msk_if_softc *sc_if;
3601 struct msk_stat_desc *sd;
3602 uint32_t control, status;
3603 int cons, len, port, rxprog;
3605 if (sc->msk_stat_cons == CSR_READ_2(sc, STAT_PUT_IDX))
3608 /* Sync status LEs. */
3609 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
3610 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3612 rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0;
3614 cons = sc->msk_stat_cons;
3616 sd = &sc->msk_stat_ring[cons];
3617 control = le32toh(sd->msk_control);
3618 if ((control & HW_OWNER) == 0)
3620 control &= ~HW_OWNER;
3621 sd->msk_control = htole32(control);
3622 status = le32toh(sd->msk_status);
3623 len = control & STLE_LEN_MASK;
3624 port = (control >> 16) & 0x01;
3625 sc_if = sc->msk_if[port];
3626 if (sc_if == NULL) {
3627 device_printf(sc->msk_dev, "invalid port opcode "
3628 "0x%08x\n", control & STLE_OP_MASK);
3632 switch (control & STLE_OP_MASK) {
3634 sc_if->msk_vtag = ntohs(len);
3637 sc_if->msk_vtag = ntohs(len);
3640 sc_if->msk_csum = status;
3643 if (!(sc_if->msk_ifp->if_drv_flags & IFF_DRV_RUNNING))
3645 if (sc_if->msk_framesize >
3646 (MCLBYTES - MSK_RX_BUF_ALIGN))
3647 msk_jumbo_rxeof(sc_if, status, control, len);
3649 msk_rxeof(sc_if, status, control, len);
3652 * Because there is no way to sync single Rx LE
3653 * put the DMA sync operation off until the end of
3657 /* Update prefetch unit if we've passed water mark. */
3658 if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) {
3664 if (sc->msk_if[MSK_PORT_A] != NULL)
3665 msk_txeof(sc->msk_if[MSK_PORT_A],
3666 status & STLE_TXA1_MSKL);
3667 if (sc->msk_if[MSK_PORT_B] != NULL)
3668 msk_txeof(sc->msk_if[MSK_PORT_B],
3669 ((status & STLE_TXA2_MSKL) >>
3671 ((len & STLE_TXA2_MSKH) <<
3675 device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n",
3676 control & STLE_OP_MASK);
3679 MSK_INC(cons, sc->msk_stat_count);
3680 if (rxprog > sc->msk_process_limit)
3684 sc->msk_stat_cons = cons;
3685 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
3686 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3688 if (rxput[MSK_PORT_A] > 0)
3689 msk_rxput(sc->msk_if[MSK_PORT_A]);
3690 if (rxput[MSK_PORT_B] > 0)
3691 msk_rxput(sc->msk_if[MSK_PORT_B]);
3693 return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX));
3699 struct msk_softc *sc;
3700 struct msk_if_softc *sc_if0, *sc_if1;
3701 struct ifnet *ifp0, *ifp1;
3708 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
3709 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
3710 if (status == 0 || status == 0xffffffff ||
3711 (sc->msk_pflags & MSK_FLAG_SUSPEND) != 0 ||
3712 (status & sc->msk_intrmask) == 0) {
3713 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3718 sc_if0 = sc->msk_if[MSK_PORT_A];
3719 sc_if1 = sc->msk_if[MSK_PORT_B];
3722 ifp0 = sc_if0->msk_ifp;
3724 ifp1 = sc_if1->msk_ifp;
3726 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
3727 msk_intr_phy(sc_if0);
3728 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
3729 msk_intr_phy(sc_if1);
3730 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
3731 msk_intr_gmac(sc_if0);
3732 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
3733 msk_intr_gmac(sc_if1);
3734 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
3735 device_printf(sc->msk_dev, "Rx descriptor error\n");
3736 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
3737 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3738 CSR_READ_4(sc, B0_IMSK);
3740 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
3741 device_printf(sc->msk_dev, "Tx descriptor error\n");
3742 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
3743 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3744 CSR_READ_4(sc, B0_IMSK);
3746 if ((status & Y2_IS_HW_ERR) != 0)
3749 domore = msk_handle_events(sc);
3750 if ((status & Y2_IS_STAT_BMU) != 0 && domore == 0)
3751 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
3753 /* Clear TWSI IRQ. */
3754 if ((status & Y2_IS_TWSI_RDY) != 0)
3755 CSR_WRITE_4(sc, B2_I2C_IRQ, 1);
3756 /* Reenable interrupts. */
3757 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3759 if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3760 !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
3761 msk_start_locked(ifp0);
3762 if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3763 !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
3764 msk_start_locked(ifp1);
3770 msk_set_tx_stfwd(struct msk_if_softc *sc_if)
3772 struct msk_softc *sc;
3775 ifp = sc_if->msk_ifp;
3776 sc = sc_if->msk_softc;
3777 if ((sc->msk_hw_id == CHIP_ID_YUKON_EX &&
3778 sc->msk_hw_rev != CHIP_REV_YU_EX_A0) ||
3779 sc->msk_hw_id >= CHIP_ID_YUKON_SUPR) {
3780 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3783 if (ifp->if_mtu > ETHERMTU) {
3784 /* Set Tx GMAC FIFO Almost Empty Threshold. */
3786 MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR),
3787 MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR);
3788 /* Disable Store & Forward mode for Tx. */
3789 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3792 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3801 struct msk_if_softc *sc_if = xsc;
3804 msk_init_locked(sc_if);
3805 MSK_IF_UNLOCK(sc_if);
3809 msk_init_locked(struct msk_if_softc *sc_if)
3811 struct msk_softc *sc;
3813 struct mii_data *mii;
3819 MSK_IF_LOCK_ASSERT(sc_if);
3821 ifp = sc_if->msk_ifp;
3822 sc = sc_if->msk_softc;
3823 mii = device_get_softc(sc_if->msk_miibus);
3825 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3829 /* Cancel pending I/O and free all Rx/Tx buffers. */
3832 if (ifp->if_mtu < ETHERMTU)
3833 sc_if->msk_framesize = ETHERMTU;
3835 sc_if->msk_framesize = ifp->if_mtu;
3836 sc_if->msk_framesize += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3837 if (ifp->if_mtu > ETHERMTU &&
3838 (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) {
3839 ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
3840 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
3843 /* GMAC Control reset. */
3844 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_SET);
3845 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_CLR);
3846 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_F_LOOPB_OFF);
3847 if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
3848 sc->msk_hw_id == CHIP_ID_YUKON_SUPR)
3849 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL),
3850 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
3854 * Initialize GMAC first such that speed/duplex/flow-control
3855 * parameters are renegotiated when interface is brought up.
3857 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, 0);
3859 /* Dummy read the Interrupt Source Register. */
3860 CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3862 /* Clear MIB stats. */
3863 msk_stats_clear(sc_if);
3866 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS);
3868 /* Setup Transmit Control Register. */
3869 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
3871 /* Setup Transmit Flow Control Register. */
3872 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff);
3874 /* Setup Transmit Parameter Register. */
3875 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM,
3876 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
3877 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
3879 gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) |
3880 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
3882 if (ifp->if_mtu > ETHERMTU)
3883 gmac |= GM_SMOD_JUMBO_ENA;
3884 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac);
3886 /* Set station address. */
3887 eaddr = IF_LLADDR(ifp);
3888 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L,
3889 eaddr[0] | (eaddr[1] << 8));
3890 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1M,
3891 eaddr[2] | (eaddr[3] << 8));
3892 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1H,
3893 eaddr[4] | (eaddr[5] << 8));
3894 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L,
3895 eaddr[0] | (eaddr[1] << 8));
3896 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2M,
3897 eaddr[2] | (eaddr[3] << 8));
3898 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2H,
3899 eaddr[4] | (eaddr[5] << 8));
3901 /* Disable interrupts for counter overflows. */
3902 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0);
3903 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0);
3904 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0);
3906 /* Configure Rx MAC FIFO. */
3907 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
3908 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR);
3909 reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
3910 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P ||
3911 sc->msk_hw_id == CHIP_ID_YUKON_EX)
3912 reg |= GMF_RX_OVER_ON;
3913 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), reg);
3915 /* Set receive filter. */
3916 msk_rxfilter(sc_if);
3918 if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
3919 /* Clear flush mask - HW bug. */
3920 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 0);
3922 /* Flush Rx MAC FIFO on any flow control or error. */
3923 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK),
3928 * Set Rx FIFO flush threshold to 64 bytes + 1 FIFO word
3929 * due to hardware hang on receipt of pause frames.
3931 reg = RX_GMF_FL_THR_DEF + 1;
3932 /* Another magic for Yukon FE+ - From Linux. */
3933 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
3934 sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0)
3936 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), reg);
3938 /* Configure Tx MAC FIFO. */
3939 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3940 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR);
3941 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON);
3943 /* Configure hardware VLAN tag insertion/stripping. */
3944 msk_setvlan(sc_if, ifp);
3946 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) {
3947 /* Set Rx Pause threshold. */
3948 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR),
3950 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR),
3952 /* Configure store-and-forward for Tx. */
3953 msk_set_tx_stfwd(sc_if);
3956 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
3957 sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
3958 /* Disable dynamic watermark - from Linux. */
3959 reg = CSR_READ_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA));
3961 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA), reg);
3965 * Disable Force Sync bit and Alloc bit in Tx RAM interface
3966 * arbiter as we don't use Sync Tx queue.
3968 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL),
3969 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
3970 /* Enable the RAM Interface Arbiter. */
3971 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB);
3973 /* Setup RAM buffer. */
3974 msk_set_rambuffer(sc_if);
3976 /* Disable Tx sync Queue. */
3977 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET);
3979 /* Setup Tx Queue Bus Memory Interface. */
3980 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET);
3981 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT);
3982 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON);
3983 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM);
3984 switch (sc->msk_hw_id) {
3985 case CHIP_ID_YUKON_EC_U:
3986 if (sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) {
3987 /* Fix for Yukon-EC Ultra: set BMU FIFO level */
3988 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL),
3992 case CHIP_ID_YUKON_EX:
3994 * Yukon Extreme seems to have silicon bug for
3995 * automatic Tx checksum calculation capability.
3997 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0)
3998 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_F),
4003 /* Setup Rx Queue Bus Memory Interface. */
4004 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET);
4005 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT);
4006 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON);
4007 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM);
4008 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
4009 sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) {
4010 /* MAC Rx RAM Read is controlled by hardware. */
4011 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS);
4014 msk_set_prefetch(sc, sc_if->msk_txq,
4015 sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1);
4016 msk_init_tx_ring(sc_if);
4018 /* Disable Rx checksum offload and RSS hash. */
4019 reg = BMU_DIS_RX_RSS_HASH;
4020 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
4021 (ifp->if_capenable & IFCAP_RXCSUM) != 0)
4022 reg |= BMU_ENA_RX_CHKSUM;
4024 reg |= BMU_DIS_RX_CHKSUM;
4025 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), reg);
4026 if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN)) {
4027 msk_set_prefetch(sc, sc_if->msk_rxq,
4028 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr,
4029 MSK_JUMBO_RX_RING_CNT - 1);
4030 error = msk_init_jumbo_rx_ring(sc_if);
4032 msk_set_prefetch(sc, sc_if->msk_rxq,
4033 sc_if->msk_rdata.msk_rx_ring_paddr,
4034 MSK_RX_RING_CNT - 1);
4035 error = msk_init_rx_ring(sc_if);
4038 device_printf(sc_if->msk_if_dev,
4039 "initialization failed: no memory for Rx buffers\n");
4043 if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
4044 sc->msk_hw_id == CHIP_ID_YUKON_SUPR) {
4045 /* Disable flushing of non-ASF packets. */
4046 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
4047 GMF_RX_MACSEC_FLUSH_OFF);
4050 /* Configure interrupt handling. */
4051 if (sc_if->msk_port == MSK_PORT_A) {
4052 sc->msk_intrmask |= Y2_IS_PORT_A;
4053 sc->msk_intrhwemask |= Y2_HWE_L1_MASK;
4055 sc->msk_intrmask |= Y2_IS_PORT_B;
4056 sc->msk_intrhwemask |= Y2_HWE_L2_MASK;
4058 /* Configure IRQ moderation mask. */
4059 CSR_WRITE_4(sc, B2_IRQM_MSK, sc->msk_intrmask);
4060 if (sc->msk_int_holdoff > 0) {
4061 /* Configure initial IRQ moderation timer value. */
4062 CSR_WRITE_4(sc, B2_IRQM_INI,
4063 MSK_USECS(sc, sc->msk_int_holdoff));
4064 CSR_WRITE_4(sc, B2_IRQM_VAL,
4065 MSK_USECS(sc, sc->msk_int_holdoff));
4066 /* Start IRQ moderation. */
4067 CSR_WRITE_1(sc, B2_IRQM_CTRL, TIM_START);
4069 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
4070 CSR_READ_4(sc, B0_HWE_IMSK);
4071 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
4072 CSR_READ_4(sc, B0_IMSK);
4074 sc_if->msk_flags &= ~MSK_FLAG_LINK;
4077 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4078 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4080 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
4084 msk_set_rambuffer(struct msk_if_softc *sc_if)
4086 struct msk_softc *sc;
4089 sc = sc_if->msk_softc;
4090 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
4093 /* Setup Rx Queue. */
4094 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR);
4095 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START),
4096 sc->msk_rxqstart[sc_if->msk_port] / 8);
4097 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END),
4098 sc->msk_rxqend[sc_if->msk_port] / 8);
4099 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP),
4100 sc->msk_rxqstart[sc_if->msk_port] / 8);
4101 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP),
4102 sc->msk_rxqstart[sc_if->msk_port] / 8);
4104 utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
4105 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8;
4106 ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
4107 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8;
4108 if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE)
4109 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8;
4110 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp);
4111 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp);
4112 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */
4114 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD);
4115 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL));
4117 /* Setup Tx Queue. */
4118 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR);
4119 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START),
4120 sc->msk_txqstart[sc_if->msk_port] / 8);
4121 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END),
4122 sc->msk_txqend[sc_if->msk_port] / 8);
4123 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP),
4124 sc->msk_txqstart[sc_if->msk_port] / 8);
4125 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP),
4126 sc->msk_txqstart[sc_if->msk_port] / 8);
4127 /* Enable Store & Forward for Tx side. */
4128 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD);
4129 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD);
4130 CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL));
4134 msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr,
4138 /* Reset the prefetch unit. */
4139 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
4141 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
4143 /* Set LE base address. */
4144 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG),
4146 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG),
4148 /* Set the list last index. */
4149 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG),
4151 /* Turn on prefetch unit. */
4152 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
4154 /* Dummy read to ensure write. */
4155 CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG));
4159 msk_stop(struct msk_if_softc *sc_if)
4161 struct msk_softc *sc;
4162 struct msk_txdesc *txd;
4163 struct msk_rxdesc *rxd;
4164 struct msk_rxdesc *jrxd;
4169 MSK_IF_LOCK_ASSERT(sc_if);
4170 sc = sc_if->msk_softc;
4171 ifp = sc_if->msk_ifp;
4173 callout_stop(&sc_if->msk_tick_ch);
4174 sc_if->msk_watchdog_timer = 0;
4176 /* Disable interrupts. */
4177 if (sc_if->msk_port == MSK_PORT_A) {
4178 sc->msk_intrmask &= ~Y2_IS_PORT_A;
4179 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK;
4181 sc->msk_intrmask &= ~Y2_IS_PORT_B;
4182 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK;
4184 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
4185 CSR_READ_4(sc, B0_HWE_IMSK);
4186 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
4187 CSR_READ_4(sc, B0_IMSK);
4189 /* Disable Tx/Rx MAC. */
4190 val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
4191 val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
4192 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val);
4193 /* Read again to ensure writing. */
4194 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
4195 /* Update stats and clear counters. */
4196 msk_stats_update(sc_if);
4199 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP);
4200 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
4201 for (i = 0; i < MSK_TIMEOUT; i++) {
4202 if ((val & (BMU_STOP | BMU_IDLE)) == 0) {
4203 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
4205 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
4210 if (i == MSK_TIMEOUT)
4211 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n");
4212 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL),
4213 RB_RST_SET | RB_DIS_OP_MD);
4215 /* Disable all GMAC interrupt. */
4216 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0);
4217 /* Disable PHY interrupt. */
4218 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
4220 /* Disable the RAM Interface Arbiter. */
4221 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB);
4223 /* Reset the PCI FIFO of the async Tx queue */
4224 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
4225 BMU_RST_SET | BMU_FIFO_RST);
4227 /* Reset the Tx prefetch units. */
4228 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG),
4231 /* Reset the RAM Buffer async Tx queue. */
4232 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET);
4234 /* Reset Tx MAC FIFO. */
4235 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
4236 /* Set Pause Off. */
4237 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF);
4240 * The Rx Stop command will not work for Yukon-2 if the BMU does not
4241 * reach the end of packet and since we can't make sure that we have
4242 * incoming data, we must reset the BMU while it is not during a DMA
4243 * transfer. Since it is possible that the Rx path is still active,
4244 * the Rx RAM buffer will be stopped first, so any possible incoming
4245 * data will not trigger a DMA. After the RAM buffer is stopped, the
4246 * BMU is polled until any DMA in progress is ended and only then it
4250 /* Disable the RAM Buffer receive queue. */
4251 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD);
4252 for (i = 0; i < MSK_TIMEOUT; i++) {
4253 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) ==
4254 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL)))
4258 if (i == MSK_TIMEOUT)
4259 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n");
4260 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
4261 BMU_RST_SET | BMU_FIFO_RST);
4262 /* Reset the Rx prefetch unit. */
4263 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG),
4265 /* Reset the RAM Buffer receive queue. */
4266 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET);
4267 /* Reset Rx MAC FIFO. */
4268 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
4270 /* Free Rx and Tx mbufs still in the queues. */
4271 for (i = 0; i < MSK_RX_RING_CNT; i++) {
4272 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
4273 if (rxd->rx_m != NULL) {
4274 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag,
4275 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
4276 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag,
4282 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
4283 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
4284 if (jrxd->rx_m != NULL) {
4285 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
4286 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
4287 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
4289 m_freem(jrxd->rx_m);
4293 for (i = 0; i < MSK_TX_RING_CNT; i++) {
4294 txd = &sc_if->msk_cdata.msk_txdesc[i];
4295 if (txd->tx_m != NULL) {
4296 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag,
4297 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
4298 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag,
4306 * Mark the interface down.
4308 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4309 sc_if->msk_flags &= ~MSK_FLAG_LINK;
4313 * When GM_PAR_MIB_CLR bit of GM_PHY_ADDR is set, reading lower
4314 * counter clears high 16 bits of the counter such that accessing
4315 * lower 16 bits should be the last operation.
4317 #define MSK_READ_MIB32(x, y) \
4318 (((uint32_t)GMAC_READ_2(sc, x, (y) + 4)) << 16) + \
4319 (uint32_t)GMAC_READ_2(sc, x, y)
4320 #define MSK_READ_MIB64(x, y) \
4321 (((uint64_t)MSK_READ_MIB32(x, (y) + 8)) << 32) + \
4322 (uint64_t)MSK_READ_MIB32(x, y)
4325 msk_stats_clear(struct msk_if_softc *sc_if)
4327 struct msk_softc *sc;
4332 MSK_IF_LOCK_ASSERT(sc_if);
4334 sc = sc_if->msk_softc;
4335 /* Set MIB Clear Counter Mode. */
4336 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
4337 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
4338 /* Read all MIB Counters with Clear Mode set. */
4339 for (i = GM_RXF_UC_OK; i <= GM_TXE_FIFO_UR; i += sizeof(uint32_t))
4340 reg = MSK_READ_MIB32(sc_if->msk_port, i);
4341 /* Clear MIB Clear Counter Mode. */
4342 gmac &= ~GM_PAR_MIB_CLR;
4343 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
4347 msk_stats_update(struct msk_if_softc *sc_if)
4349 struct msk_softc *sc;
4351 struct msk_hw_stats *stats;
4355 MSK_IF_LOCK_ASSERT(sc_if);
4357 ifp = sc_if->msk_ifp;
4358 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
4360 sc = sc_if->msk_softc;
4361 stats = &sc_if->msk_stats;
4362 /* Set MIB Clear Counter Mode. */
4363 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
4364 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
4367 stats->rx_ucast_frames +=
4368 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_UC_OK);
4369 stats->rx_bcast_frames +=
4370 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_BC_OK);
4371 stats->rx_pause_frames +=
4372 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MPAUSE);
4373 stats->rx_mcast_frames +=
4374 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MC_OK);
4375 stats->rx_crc_errs +=
4376 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_FCS_ERR);
4377 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE1);
4378 stats->rx_good_octets +=
4379 MSK_READ_MIB64(sc_if->msk_port, GM_RXO_OK_LO);
4380 stats->rx_bad_octets +=
4381 MSK_READ_MIB64(sc_if->msk_port, GM_RXO_ERR_LO);
4383 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SHT);
4384 stats->rx_runt_errs +=
4385 MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FRAG);
4386 stats->rx_pkts_64 +=
4387 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_64B);
4388 stats->rx_pkts_65_127 +=
4389 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_127B);
4390 stats->rx_pkts_128_255 +=
4391 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_255B);
4392 stats->rx_pkts_256_511 +=
4393 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_511B);
4394 stats->rx_pkts_512_1023 +=
4395 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1023B);
4396 stats->rx_pkts_1024_1518 +=
4397 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1518B);
4398 stats->rx_pkts_1519_max +=
4399 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MAX_SZ);
4400 stats->rx_pkts_too_long +=
4401 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_LNG_ERR);
4402 stats->rx_pkts_jabbers +=
4403 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_JAB_PKT);
4404 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE2);
4405 stats->rx_fifo_oflows +=
4406 MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FIFO_OV);
4407 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE3);
4410 stats->tx_ucast_frames +=
4411 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_UC_OK);
4412 stats->tx_bcast_frames +=
4413 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_BC_OK);
4414 stats->tx_pause_frames +=
4415 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MPAUSE);
4416 stats->tx_mcast_frames +=
4417 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MC_OK);
4419 MSK_READ_MIB64(sc_if->msk_port, GM_TXO_OK_LO);
4420 stats->tx_pkts_64 +=
4421 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_64B);
4422 stats->tx_pkts_65_127 +=
4423 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_127B);
4424 stats->tx_pkts_128_255 +=
4425 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_255B);
4426 stats->tx_pkts_256_511 +=
4427 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_511B);
4428 stats->tx_pkts_512_1023 +=
4429 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1023B);
4430 stats->tx_pkts_1024_1518 +=
4431 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1518B);
4432 stats->tx_pkts_1519_max +=
4433 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MAX_SZ);
4434 reg = MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SPARE1);
4436 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_COL);
4437 stats->tx_late_colls +=
4438 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_LAT_COL);
4439 stats->tx_excess_colls +=
4440 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_ABO_COL);
4441 stats->tx_multi_colls +=
4442 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MUL_COL);
4443 stats->tx_single_colls +=
4444 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SNG_COL);
4445 stats->tx_underflows +=
4446 MSK_READ_MIB32(sc_if->msk_port, GM_TXE_FIFO_UR);
4447 /* Clear MIB Clear Counter Mode. */
4448 gmac &= ~GM_PAR_MIB_CLR;
4449 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
4453 msk_sysctl_stat32(SYSCTL_HANDLER_ARGS)
4455 struct msk_softc *sc;
4456 struct msk_if_softc *sc_if;
4457 uint32_t result, *stat;
4460 sc_if = (struct msk_if_softc *)arg1;
4461 sc = sc_if->msk_softc;
4463 stat = (uint32_t *)((uint8_t *)&sc_if->msk_stats + off);
4466 result = MSK_READ_MIB32(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2);
4468 MSK_IF_UNLOCK(sc_if);
4470 return (sysctl_handle_int(oidp, &result, 0, req));
4474 msk_sysctl_stat64(SYSCTL_HANDLER_ARGS)
4476 struct msk_softc *sc;
4477 struct msk_if_softc *sc_if;
4478 uint64_t result, *stat;
4481 sc_if = (struct msk_if_softc *)arg1;
4482 sc = sc_if->msk_softc;
4484 stat = (uint64_t *)((uint8_t *)&sc_if->msk_stats + off);
4487 result = MSK_READ_MIB64(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2);
4489 MSK_IF_UNLOCK(sc_if);
4491 return (sysctl_handle_64(oidp, &result, 0, req));
4494 #undef MSK_READ_MIB32
4495 #undef MSK_READ_MIB64
4497 #define MSK_SYSCTL_STAT32(sc, c, o, p, n, d) \
4498 SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_UINT | CTLFLAG_RD, \
4499 sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat32, \
4501 #define MSK_SYSCTL_STAT64(sc, c, o, p, n, d) \
4502 SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_U64 | CTLFLAG_RD, \
4503 sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat64, \
4507 msk_sysctl_node(struct msk_if_softc *sc_if)
4509 struct sysctl_ctx_list *ctx;
4510 struct sysctl_oid_list *child, *schild;
4511 struct sysctl_oid *tree;
4513 ctx = device_get_sysctl_ctx(sc_if->msk_if_dev);
4514 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc_if->msk_if_dev));
4516 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
4517 NULL, "MSK Statistics");
4518 schild = child = SYSCTL_CHILDREN(tree);
4519 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
4520 NULL, "MSK RX Statistics");
4521 child = SYSCTL_CHILDREN(tree);
4522 MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames",
4523 child, rx_ucast_frames, "Good unicast frames");
4524 MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames",
4525 child, rx_bcast_frames, "Good broadcast frames");
4526 MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames",
4527 child, rx_pause_frames, "Pause frames");
4528 MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames",
4529 child, rx_mcast_frames, "Multicast frames");
4530 MSK_SYSCTL_STAT32(sc_if, ctx, "crc_errs",
4531 child, rx_crc_errs, "CRC errors");
4532 MSK_SYSCTL_STAT64(sc_if, ctx, "good_octets",
4533 child, rx_good_octets, "Good octets");
4534 MSK_SYSCTL_STAT64(sc_if, ctx, "bad_octets",
4535 child, rx_bad_octets, "Bad octets");
4536 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64",
4537 child, rx_pkts_64, "64 bytes frames");
4538 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127",
4539 child, rx_pkts_65_127, "65 to 127 bytes frames");
4540 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255",
4541 child, rx_pkts_128_255, "128 to 255 bytes frames");
4542 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511",
4543 child, rx_pkts_256_511, "256 to 511 bytes frames");
4544 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023",
4545 child, rx_pkts_512_1023, "512 to 1023 bytes frames");
4546 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518",
4547 child, rx_pkts_1024_1518, "1024 to 1518 bytes frames");
4548 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max",
4549 child, rx_pkts_1519_max, "1519 to max frames");
4550 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_too_long",
4551 child, rx_pkts_too_long, "frames too long");
4552 MSK_SYSCTL_STAT32(sc_if, ctx, "jabbers",
4553 child, rx_pkts_jabbers, "Jabber errors");
4554 MSK_SYSCTL_STAT32(sc_if, ctx, "overflows",
4555 child, rx_fifo_oflows, "FIFO overflows");
4557 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
4558 NULL, "MSK TX Statistics");
4559 child = SYSCTL_CHILDREN(tree);
4560 MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames",
4561 child, tx_ucast_frames, "Unicast frames");
4562 MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames",
4563 child, tx_bcast_frames, "Broadcast frames");
4564 MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames",
4565 child, tx_pause_frames, "Pause frames");
4566 MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames",
4567 child, tx_mcast_frames, "Multicast frames");
4568 MSK_SYSCTL_STAT64(sc_if, ctx, "octets",
4569 child, tx_octets, "Octets");
4570 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64",
4571 child, tx_pkts_64, "64 bytes frames");
4572 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127",
4573 child, tx_pkts_65_127, "65 to 127 bytes frames");
4574 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255",
4575 child, tx_pkts_128_255, "128 to 255 bytes frames");
4576 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511",
4577 child, tx_pkts_256_511, "256 to 511 bytes frames");
4578 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023",
4579 child, tx_pkts_512_1023, "512 to 1023 bytes frames");
4580 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518",
4581 child, tx_pkts_1024_1518, "1024 to 1518 bytes frames");
4582 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max",
4583 child, tx_pkts_1519_max, "1519 to max frames");
4584 MSK_SYSCTL_STAT32(sc_if, ctx, "colls",
4585 child, tx_colls, "Collisions");
4586 MSK_SYSCTL_STAT32(sc_if, ctx, "late_colls",
4587 child, tx_late_colls, "Late collisions");
4588 MSK_SYSCTL_STAT32(sc_if, ctx, "excess_colls",
4589 child, tx_excess_colls, "Excessive collisions");
4590 MSK_SYSCTL_STAT32(sc_if, ctx, "multi_colls",
4591 child, tx_multi_colls, "Multiple collisions");
4592 MSK_SYSCTL_STAT32(sc_if, ctx, "single_colls",
4593 child, tx_single_colls, "Single collisions");
4594 MSK_SYSCTL_STAT32(sc_if, ctx, "underflows",
4595 child, tx_underflows, "FIFO underflows");
4598 #undef MSK_SYSCTL_STAT32
4599 #undef MSK_SYSCTL_STAT64
4602 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
4608 value = *(int *)arg1;
4609 error = sysctl_handle_int(oidp, &value, 0, req);
4610 if (error || !req->newptr)
4612 if (value < low || value > high)
4614 *(int *)arg1 = value;
4620 sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS)
4623 return (sysctl_int_range(oidp, arg1, arg2, req, MSK_PROC_MIN,