1 /******************************************************************************
4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x
5 * Version: $Revision: 1.23 $
6 * Date : $Date: 2005/12/22 09:04:11 $
7 * Purpose: Main driver source file
9 *****************************************************************************/
11 /******************************************************************************
14 * Copyright (C) Marvell International Ltd. and/or its affiliates
16 * The computer program files contained in this folder ("Files")
17 * are provided to you under the BSD-type license terms provided
18 * below, and any use of such Files and any derivative works
19 * thereof created by you shall be governed by the following terms
22 * - Redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer.
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials provided
27 * with the distribution.
28 * - Neither the name of Marvell nor the names of its contributors
29 * may be used to endorse or promote products derived from this
30 * software without specific prior written permission.
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
38 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
39 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
46 *****************************************************************************/
49 * SPDX-License-Identifier: BSD-4-Clause AND BSD-3-Clause
51 * Copyright (c) 1997, 1998, 1999, 2000
52 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
54 * Redistribution and use in source and binary forms, with or without
55 * modification, are permitted provided that the following conditions
57 * 1. Redistributions of source code must retain the above copyright
58 * notice, this list of conditions and the following disclaimer.
59 * 2. Redistributions in binary form must reproduce the above copyright
60 * notice, this list of conditions and the following disclaimer in the
61 * documentation and/or other materials provided with the distribution.
62 * 3. All advertising materials mentioning features or use of this software
63 * must display the following acknowledgement:
64 * This product includes software developed by Bill Paul.
65 * 4. Neither the name of the author nor the names of any co-contributors
66 * may be used to endorse or promote products derived from this software
67 * without specific prior written permission.
69 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
70 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
71 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
72 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
73 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
74 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
75 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
76 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
77 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
78 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
79 * THE POSSIBILITY OF SUCH DAMAGE.
82 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
84 * Permission to use, copy, modify, and distribute this software for any
85 * purpose with or without fee is hereby granted, provided that the above
86 * copyright notice and this permission notice appear in all copies.
88 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
89 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
90 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
91 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
92 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
93 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
94 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
98 * Device driver for the Marvell Yukon II Ethernet controller.
99 * Due to lack of documentation, this driver is based on the code from
100 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x.
103 #include <sys/cdefs.h>
104 __FBSDID("$FreeBSD$");
106 #include <sys/param.h>
107 #include <sys/systm.h>
109 #include <sys/endian.h>
110 #include <sys/mbuf.h>
111 #include <sys/malloc.h>
112 #include <sys/kernel.h>
113 #include <sys/module.h>
114 #include <sys/socket.h>
115 #include <sys/sockio.h>
116 #include <sys/queue.h>
117 #include <sys/sysctl.h>
120 #include <net/ethernet.h>
122 #include <net/if_var.h>
123 #include <net/if_arp.h>
124 #include <net/if_dl.h>
125 #include <net/if_media.h>
126 #include <net/if_types.h>
127 #include <net/if_vlan_var.h>
129 #include <netinet/in.h>
130 #include <netinet/in_systm.h>
131 #include <netinet/ip.h>
132 #include <netinet/tcp.h>
133 #include <netinet/udp.h>
135 #include <machine/bus.h>
136 #include <machine/in_cksum.h>
137 #include <machine/resource.h>
138 #include <sys/rman.h>
140 #include <dev/mii/mii.h>
141 #include <dev/mii/miivar.h>
143 #include <dev/pci/pcireg.h>
144 #include <dev/pci/pcivar.h>
146 #include <dev/msk/if_mskreg.h>
148 MODULE_DEPEND(msk, pci, 1, 1, 1);
149 MODULE_DEPEND(msk, ether, 1, 1, 1);
150 MODULE_DEPEND(msk, miibus, 1, 1, 1);
152 /* "device miibus" required. See GENERIC if you get errors here. */
153 #include "miibus_if.h"
156 static int msi_disable = 0;
157 TUNABLE_INT("hw.msk.msi_disable", &msi_disable);
158 static int legacy_intr = 0;
159 TUNABLE_INT("hw.msk.legacy_intr", &legacy_intr);
160 static int jumbo_disable = 0;
161 TUNABLE_INT("hw.msk.jumbo_disable", &jumbo_disable);
163 #define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
166 * Devices supported by this driver.
168 static const struct msk_product {
169 uint16_t msk_vendorid;
170 uint16_t msk_deviceid;
171 const char *msk_name;
173 { VENDORID_SK, DEVICEID_SK_YUKON2,
174 "SK-9Sxx Gigabit Ethernet" },
175 { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR,
176 "SK-9Exx Gigabit Ethernet"},
177 { VENDORID_MARVELL, DEVICEID_MRVL_8021CU,
178 "Marvell Yukon 88E8021CU Gigabit Ethernet" },
179 { VENDORID_MARVELL, DEVICEID_MRVL_8021X,
180 "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" },
181 { VENDORID_MARVELL, DEVICEID_MRVL_8022CU,
182 "Marvell Yukon 88E8022CU Gigabit Ethernet" },
183 { VENDORID_MARVELL, DEVICEID_MRVL_8022X,
184 "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" },
185 { VENDORID_MARVELL, DEVICEID_MRVL_8061CU,
186 "Marvell Yukon 88E8061CU Gigabit Ethernet" },
187 { VENDORID_MARVELL, DEVICEID_MRVL_8061X,
188 "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" },
189 { VENDORID_MARVELL, DEVICEID_MRVL_8062CU,
190 "Marvell Yukon 88E8062CU Gigabit Ethernet" },
191 { VENDORID_MARVELL, DEVICEID_MRVL_8062X,
192 "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" },
193 { VENDORID_MARVELL, DEVICEID_MRVL_8035,
194 "Marvell Yukon 88E8035 Fast Ethernet" },
195 { VENDORID_MARVELL, DEVICEID_MRVL_8036,
196 "Marvell Yukon 88E8036 Fast Ethernet" },
197 { VENDORID_MARVELL, DEVICEID_MRVL_8038,
198 "Marvell Yukon 88E8038 Fast Ethernet" },
199 { VENDORID_MARVELL, DEVICEID_MRVL_8039,
200 "Marvell Yukon 88E8039 Fast Ethernet" },
201 { VENDORID_MARVELL, DEVICEID_MRVL_8040,
202 "Marvell Yukon 88E8040 Fast Ethernet" },
203 { VENDORID_MARVELL, DEVICEID_MRVL_8040T,
204 "Marvell Yukon 88E8040T Fast Ethernet" },
205 { VENDORID_MARVELL, DEVICEID_MRVL_8042,
206 "Marvell Yukon 88E8042 Fast Ethernet" },
207 { VENDORID_MARVELL, DEVICEID_MRVL_8048,
208 "Marvell Yukon 88E8048 Fast Ethernet" },
209 { VENDORID_MARVELL, DEVICEID_MRVL_4361,
210 "Marvell Yukon 88E8050 Gigabit Ethernet" },
211 { VENDORID_MARVELL, DEVICEID_MRVL_4360,
212 "Marvell Yukon 88E8052 Gigabit Ethernet" },
213 { VENDORID_MARVELL, DEVICEID_MRVL_4362,
214 "Marvell Yukon 88E8053 Gigabit Ethernet" },
215 { VENDORID_MARVELL, DEVICEID_MRVL_4363,
216 "Marvell Yukon 88E8055 Gigabit Ethernet" },
217 { VENDORID_MARVELL, DEVICEID_MRVL_4364,
218 "Marvell Yukon 88E8056 Gigabit Ethernet" },
219 { VENDORID_MARVELL, DEVICEID_MRVL_4365,
220 "Marvell Yukon 88E8070 Gigabit Ethernet" },
221 { VENDORID_MARVELL, DEVICEID_MRVL_436A,
222 "Marvell Yukon 88E8058 Gigabit Ethernet" },
223 { VENDORID_MARVELL, DEVICEID_MRVL_436B,
224 "Marvell Yukon 88E8071 Gigabit Ethernet" },
225 { VENDORID_MARVELL, DEVICEID_MRVL_436C,
226 "Marvell Yukon 88E8072 Gigabit Ethernet" },
227 { VENDORID_MARVELL, DEVICEID_MRVL_436D,
228 "Marvell Yukon 88E8055 Gigabit Ethernet" },
229 { VENDORID_MARVELL, DEVICEID_MRVL_4370,
230 "Marvell Yukon 88E8075 Gigabit Ethernet" },
231 { VENDORID_MARVELL, DEVICEID_MRVL_4380,
232 "Marvell Yukon 88E8057 Gigabit Ethernet" },
233 { VENDORID_MARVELL, DEVICEID_MRVL_4381,
234 "Marvell Yukon 88E8059 Gigabit Ethernet" },
235 { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX,
236 "D-Link 550SX Gigabit Ethernet" },
237 { VENDORID_DLINK, DEVICEID_DLINK_DGE560SX,
238 "D-Link 560SX Gigabit Ethernet" },
239 { VENDORID_DLINK, DEVICEID_DLINK_DGE560T,
240 "D-Link 560T Gigabit Ethernet" }
243 static const char *model_name[] = {
256 static int mskc_probe(device_t);
257 static int mskc_attach(device_t);
258 static int mskc_detach(device_t);
259 static int mskc_shutdown(device_t);
260 static int mskc_setup_rambuffer(struct msk_softc *);
261 static int mskc_suspend(device_t);
262 static int mskc_resume(device_t);
263 static bus_dma_tag_t mskc_get_dma_tag(device_t, device_t);
264 static void mskc_reset(struct msk_softc *);
266 static int msk_probe(device_t);
267 static int msk_attach(device_t);
268 static int msk_detach(device_t);
270 static void msk_tick(void *);
271 static void msk_intr(void *);
272 static void msk_intr_phy(struct msk_if_softc *);
273 static void msk_intr_gmac(struct msk_if_softc *);
274 static __inline void msk_rxput(struct msk_if_softc *);
275 static int msk_handle_events(struct msk_softc *);
276 static void msk_handle_hwerr(struct msk_if_softc *, uint32_t);
277 static void msk_intr_hwerr(struct msk_softc *);
278 #ifndef __NO_STRICT_ALIGNMENT
279 static __inline void msk_fixup_rx(struct mbuf *);
281 static __inline void msk_rxcsum(struct msk_if_softc *, uint32_t, struct mbuf *);
282 static void msk_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int);
283 static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int);
284 static void msk_txeof(struct msk_if_softc *, int);
285 static int msk_encap(struct msk_if_softc *, struct mbuf **);
286 static void msk_start(struct ifnet *);
287 static void msk_start_locked(struct ifnet *);
288 static int msk_ioctl(struct ifnet *, u_long, caddr_t);
289 static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t);
290 static void msk_set_rambuffer(struct msk_if_softc *);
291 static void msk_set_tx_stfwd(struct msk_if_softc *);
292 static void msk_init(void *);
293 static void msk_init_locked(struct msk_if_softc *);
294 static void msk_stop(struct msk_if_softc *);
295 static void msk_watchdog(struct msk_if_softc *);
296 static int msk_mediachange(struct ifnet *);
297 static void msk_mediastatus(struct ifnet *, struct ifmediareq *);
298 static void msk_phy_power(struct msk_softc *, int);
299 static void msk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
300 static int msk_status_dma_alloc(struct msk_softc *);
301 static void msk_status_dma_free(struct msk_softc *);
302 static int msk_txrx_dma_alloc(struct msk_if_softc *);
303 static int msk_rx_dma_jalloc(struct msk_if_softc *);
304 static void msk_txrx_dma_free(struct msk_if_softc *);
305 static void msk_rx_dma_jfree(struct msk_if_softc *);
306 static int msk_rx_fill(struct msk_if_softc *, int);
307 static int msk_init_rx_ring(struct msk_if_softc *);
308 static int msk_init_jumbo_rx_ring(struct msk_if_softc *);
309 static void msk_init_tx_ring(struct msk_if_softc *);
310 static __inline void msk_discard_rxbuf(struct msk_if_softc *, int);
311 static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int);
312 static int msk_newbuf(struct msk_if_softc *, int);
313 static int msk_jumbo_newbuf(struct msk_if_softc *, int);
315 static int msk_phy_readreg(struct msk_if_softc *, int, int);
316 static int msk_phy_writereg(struct msk_if_softc *, int, int, int);
317 static int msk_miibus_readreg(device_t, int, int);
318 static int msk_miibus_writereg(device_t, int, int, int);
319 static void msk_miibus_statchg(device_t);
321 static void msk_rxfilter(struct msk_if_softc *);
322 static void msk_setvlan(struct msk_if_softc *, struct ifnet *);
324 static void msk_stats_clear(struct msk_if_softc *);
325 static void msk_stats_update(struct msk_if_softc *);
326 static int msk_sysctl_stat32(SYSCTL_HANDLER_ARGS);
327 static int msk_sysctl_stat64(SYSCTL_HANDLER_ARGS);
328 static void msk_sysctl_node(struct msk_if_softc *);
329 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
330 static int sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS);
332 static device_method_t mskc_methods[] = {
333 /* Device interface */
334 DEVMETHOD(device_probe, mskc_probe),
335 DEVMETHOD(device_attach, mskc_attach),
336 DEVMETHOD(device_detach, mskc_detach),
337 DEVMETHOD(device_suspend, mskc_suspend),
338 DEVMETHOD(device_resume, mskc_resume),
339 DEVMETHOD(device_shutdown, mskc_shutdown),
341 DEVMETHOD(bus_get_dma_tag, mskc_get_dma_tag),
346 static driver_t mskc_driver = {
349 sizeof(struct msk_softc)
352 static devclass_t mskc_devclass;
354 static device_method_t msk_methods[] = {
355 /* Device interface */
356 DEVMETHOD(device_probe, msk_probe),
357 DEVMETHOD(device_attach, msk_attach),
358 DEVMETHOD(device_detach, msk_detach),
359 DEVMETHOD(device_shutdown, bus_generic_shutdown),
362 DEVMETHOD(miibus_readreg, msk_miibus_readreg),
363 DEVMETHOD(miibus_writereg, msk_miibus_writereg),
364 DEVMETHOD(miibus_statchg, msk_miibus_statchg),
369 static driver_t msk_driver = {
372 sizeof(struct msk_if_softc)
375 static devclass_t msk_devclass;
377 DRIVER_MODULE(mskc, pci, mskc_driver, mskc_devclass, NULL, NULL);
378 DRIVER_MODULE(msk, mskc, msk_driver, msk_devclass, NULL, NULL);
379 DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, NULL, NULL);
381 static struct resource_spec msk_res_spec_io[] = {
382 { SYS_RES_IOPORT, PCIR_BAR(1), RF_ACTIVE },
386 static struct resource_spec msk_res_spec_mem[] = {
387 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
391 static struct resource_spec msk_irq_spec_legacy[] = {
392 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
396 static struct resource_spec msk_irq_spec_msi[] = {
397 { SYS_RES_IRQ, 1, RF_ACTIVE },
402 msk_miibus_readreg(device_t dev, int phy, int reg)
404 struct msk_if_softc *sc_if;
406 sc_if = device_get_softc(dev);
408 return (msk_phy_readreg(sc_if, phy, reg));
412 msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg)
414 struct msk_softc *sc;
417 sc = sc_if->msk_softc;
419 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
420 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
422 for (i = 0; i < MSK_TIMEOUT; i++) {
424 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL);
425 if ((val & GM_SMI_CT_RD_VAL) != 0) {
426 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA);
431 if (i == MSK_TIMEOUT) {
432 if_printf(sc_if->msk_ifp, "phy failed to come ready\n");
440 msk_miibus_writereg(device_t dev, int phy, int reg, int val)
442 struct msk_if_softc *sc_if;
444 sc_if = device_get_softc(dev);
446 return (msk_phy_writereg(sc_if, phy, reg, val));
450 msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val)
452 struct msk_softc *sc;
455 sc = sc_if->msk_softc;
457 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val);
458 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
459 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg));
460 for (i = 0; i < MSK_TIMEOUT; i++) {
462 if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) &
463 GM_SMI_CT_BUSY) == 0)
466 if (i == MSK_TIMEOUT)
467 if_printf(sc_if->msk_ifp, "phy write timeout\n");
473 msk_miibus_statchg(device_t dev)
475 struct msk_softc *sc;
476 struct msk_if_softc *sc_if;
477 struct mii_data *mii;
481 sc_if = device_get_softc(dev);
482 sc = sc_if->msk_softc;
484 MSK_IF_LOCK_ASSERT(sc_if);
486 mii = device_get_softc(sc_if->msk_miibus);
487 ifp = sc_if->msk_ifp;
488 if (mii == NULL || ifp == NULL ||
489 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
492 sc_if->msk_flags &= ~MSK_FLAG_LINK;
493 if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) ==
494 (IFM_AVALID | IFM_ACTIVE)) {
495 switch (IFM_SUBTYPE(mii->mii_media_active)) {
498 sc_if->msk_flags |= MSK_FLAG_LINK;
504 if ((sc_if->msk_flags & MSK_FLAG_FASTETHER) == 0)
505 sc_if->msk_flags |= MSK_FLAG_LINK;
512 if ((sc_if->msk_flags & MSK_FLAG_LINK) != 0) {
513 /* Enable Tx FIFO Underrun. */
514 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK),
515 GM_IS_TX_FF_UR | GM_IS_RX_FF_OR);
517 * Because mii(4) notify msk(4) that it detected link status
518 * change, there is no need to enable automatic
519 * speed/flow-control/duplex updates.
521 gmac = GM_GPCR_AU_ALL_DIS;
522 switch (IFM_SUBTYPE(mii->mii_media_active)) {
525 gmac |= GM_GPCR_SPEED_1000;
528 gmac |= GM_GPCR_SPEED_100;
534 if ((IFM_OPTIONS(mii->mii_media_active) &
535 IFM_ETH_RXPAUSE) == 0)
536 gmac |= GM_GPCR_FC_RX_DIS;
537 if ((IFM_OPTIONS(mii->mii_media_active) &
538 IFM_ETH_TXPAUSE) == 0)
539 gmac |= GM_GPCR_FC_TX_DIS;
540 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
541 gmac |= GM_GPCR_DUP_FULL;
543 gmac |= GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS;
544 gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
545 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
546 /* Read again to ensure writing. */
547 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
548 gmac = GMC_PAUSE_OFF;
549 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
550 if ((IFM_OPTIONS(mii->mii_media_active) &
551 IFM_ETH_RXPAUSE) != 0)
554 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac);
556 /* Enable PHY interrupt for FIFO underrun/overflow. */
557 msk_phy_writereg(sc_if, PHY_ADDR_MARV,
558 PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR);
561 * Link state changed to down.
562 * Disable PHY interrupts.
564 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
565 /* Disable Rx/Tx MAC. */
566 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
567 if ((gmac & (GM_GPCR_RX_ENA | GM_GPCR_TX_ENA)) != 0) {
568 gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
569 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
570 /* Read again to ensure writing. */
571 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
577 msk_rxfilter(struct msk_if_softc *sc_if)
579 struct msk_softc *sc;
581 struct ifmultiaddr *ifma;
586 sc = sc_if->msk_softc;
588 MSK_IF_LOCK_ASSERT(sc_if);
590 ifp = sc_if->msk_ifp;
592 bzero(mchash, sizeof(mchash));
593 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
594 if ((ifp->if_flags & IFF_PROMISC) != 0)
595 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
596 else if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
597 mode |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
601 mode |= GM_RXCR_UCF_ENA;
603 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
604 if (ifma->ifma_addr->sa_family != AF_LINK)
606 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
607 ifma->ifma_addr), ETHER_ADDR_LEN);
608 /* Just want the 6 least significant bits. */
610 /* Set the corresponding bit in the hash table. */
611 mchash[crc >> 5] |= 1 << (crc & 0x1f);
613 if_maddr_runlock(ifp);
614 if (mchash[0] != 0 || mchash[1] != 0)
615 mode |= GM_RXCR_MCF_ENA;
618 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1,
620 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2,
621 (mchash[0] >> 16) & 0xffff);
622 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3,
624 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4,
625 (mchash[1] >> 16) & 0xffff);
626 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
630 msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp)
632 struct msk_softc *sc;
634 sc = sc_if->msk_softc;
635 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
636 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
638 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
641 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
643 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
649 msk_rx_fill(struct msk_if_softc *sc_if, int jumbo)
654 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
655 (sc_if->msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) {
656 /* Wait until controller executes OP_TCPSTART command. */
657 for (i = 100; i > 0; i--) {
659 idx = CSR_READ_2(sc_if->msk_softc,
660 Y2_PREF_Q_ADDR(sc_if->msk_rxq,
661 PREF_UNIT_GET_IDX_REG));
666 device_printf(sc_if->msk_if_dev,
667 "prefetch unit stuck?\n");
671 * Fill consumed LE with free buffer. This can be done
672 * in Rx handler but we don't want to add special code
676 if (msk_jumbo_newbuf(sc_if, 0) != 0)
678 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
679 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
680 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
682 if (msk_newbuf(sc_if, 0) != 0)
684 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
685 sc_if->msk_cdata.msk_rx_ring_map,
686 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
688 sc_if->msk_cdata.msk_rx_prod = 0;
689 CSR_WRITE_2(sc_if->msk_softc,
690 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
691 sc_if->msk_cdata.msk_rx_prod);
697 msk_init_rx_ring(struct msk_if_softc *sc_if)
699 struct msk_ring_data *rd;
700 struct msk_rxdesc *rxd;
703 MSK_IF_LOCK_ASSERT(sc_if);
705 sc_if->msk_cdata.msk_rx_cons = 0;
706 sc_if->msk_cdata.msk_rx_prod = 0;
707 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
709 rd = &sc_if->msk_rdata;
710 bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
711 for (i = prod = 0; i < MSK_RX_RING_CNT; i++) {
712 rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
714 rxd->rx_le = &rd->msk_rx_ring[prod];
715 MSK_INC(prod, MSK_RX_RING_CNT);
717 nbuf = MSK_RX_BUF_CNT;
719 /* Have controller know how to compute Rx checksum. */
720 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
721 (sc_if->msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) {
723 rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
725 rxd->rx_le = &rd->msk_rx_ring[prod];
726 rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
728 rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
729 MSK_INC(prod, MSK_RX_RING_CNT);
730 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
732 rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
734 rxd->rx_le = &rd->msk_rx_ring[prod];
735 rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
737 rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
738 MSK_INC(prod, MSK_RX_RING_CNT);
739 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
742 for (i = 0; i < nbuf; i++) {
743 if (msk_newbuf(sc_if, prod) != 0)
745 MSK_RX_INC(prod, MSK_RX_RING_CNT);
748 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
749 sc_if->msk_cdata.msk_rx_ring_map,
750 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
752 /* Update prefetch unit. */
753 sc_if->msk_cdata.msk_rx_prod = prod;
754 CSR_WRITE_2(sc_if->msk_softc,
755 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
756 (sc_if->msk_cdata.msk_rx_prod + MSK_RX_RING_CNT - 1) %
758 if (msk_rx_fill(sc_if, 0) != 0)
764 msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if)
766 struct msk_ring_data *rd;
767 struct msk_rxdesc *rxd;
770 MSK_IF_LOCK_ASSERT(sc_if);
772 sc_if->msk_cdata.msk_rx_cons = 0;
773 sc_if->msk_cdata.msk_rx_prod = 0;
774 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
776 rd = &sc_if->msk_rdata;
777 bzero(rd->msk_jumbo_rx_ring,
778 sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT);
779 for (i = prod = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
780 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
782 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
783 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
785 nbuf = MSK_RX_BUF_CNT;
787 /* Have controller know how to compute Rx checksum. */
788 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
789 (sc_if->msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) {
791 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
793 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
794 rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
796 rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
797 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
798 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
800 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
802 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
803 rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
805 rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
806 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
807 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
810 for (i = 0; i < nbuf; i++) {
811 if (msk_jumbo_newbuf(sc_if, prod) != 0)
813 MSK_RX_INC(prod, MSK_JUMBO_RX_RING_CNT);
816 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
817 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
818 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
820 /* Update prefetch unit. */
821 sc_if->msk_cdata.msk_rx_prod = prod;
822 CSR_WRITE_2(sc_if->msk_softc,
823 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
824 (sc_if->msk_cdata.msk_rx_prod + MSK_JUMBO_RX_RING_CNT - 1) %
825 MSK_JUMBO_RX_RING_CNT);
826 if (msk_rx_fill(sc_if, 1) != 0)
832 msk_init_tx_ring(struct msk_if_softc *sc_if)
834 struct msk_ring_data *rd;
835 struct msk_txdesc *txd;
838 sc_if->msk_cdata.msk_tso_mtu = 0;
839 sc_if->msk_cdata.msk_last_csum = 0;
840 sc_if->msk_cdata.msk_tx_prod = 0;
841 sc_if->msk_cdata.msk_tx_cons = 0;
842 sc_if->msk_cdata.msk_tx_cnt = 0;
843 sc_if->msk_cdata.msk_tx_high_addr = 0;
845 rd = &sc_if->msk_rdata;
846 bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT);
847 for (i = 0; i < MSK_TX_RING_CNT; i++) {
848 txd = &sc_if->msk_cdata.msk_txdesc[i];
850 txd->tx_le = &rd->msk_tx_ring[i];
853 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
854 sc_if->msk_cdata.msk_tx_ring_map,
855 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
859 msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx)
861 struct msk_rx_desc *rx_le;
862 struct msk_rxdesc *rxd;
866 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
868 rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
869 MSK_INC(idx, MSK_RX_RING_CNT);
871 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
874 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
878 msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx)
880 struct msk_rx_desc *rx_le;
881 struct msk_rxdesc *rxd;
885 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
887 rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
888 MSK_INC(idx, MSK_JUMBO_RX_RING_CNT);
890 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
893 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
897 msk_newbuf(struct msk_if_softc *sc_if, int idx)
899 struct msk_rx_desc *rx_le;
900 struct msk_rxdesc *rxd;
902 bus_dma_segment_t segs[1];
906 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
910 m->m_len = m->m_pkthdr.len = MCLBYTES;
911 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
912 m_adj(m, ETHER_ALIGN);
913 #ifndef __NO_STRICT_ALIGNMENT
915 m_adj(m, MSK_RX_BUF_ALIGN);
918 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_rx_tag,
919 sc_if->msk_cdata.msk_rx_sparemap, m, segs, &nsegs,
920 BUS_DMA_NOWAIT) != 0) {
924 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
926 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
929 rx_le->msk_addr = htole32(MSK_ADDR_HI(segs[0].ds_addr));
930 rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
931 MSK_INC(idx, MSK_RX_RING_CNT);
932 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
934 if (rxd->rx_m != NULL) {
935 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
936 BUS_DMASYNC_POSTREAD);
937 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap);
940 map = rxd->rx_dmamap;
941 rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap;
942 sc_if->msk_cdata.msk_rx_sparemap = map;
943 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
944 BUS_DMASYNC_PREREAD);
947 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
949 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
955 msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx)
957 struct msk_rx_desc *rx_le;
958 struct msk_rxdesc *rxd;
960 bus_dma_segment_t segs[1];
964 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
967 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
968 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
969 m_adj(m, ETHER_ALIGN);
970 #ifndef __NO_STRICT_ALIGNMENT
972 m_adj(m, MSK_RX_BUF_ALIGN);
975 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag,
976 sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs,
977 BUS_DMA_NOWAIT) != 0) {
981 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
983 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
986 rx_le->msk_addr = htole32(MSK_ADDR_HI(segs[0].ds_addr));
987 rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
988 MSK_INC(idx, MSK_JUMBO_RX_RING_CNT);
989 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
991 if (rxd->rx_m != NULL) {
992 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
993 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
994 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
998 map = rxd->rx_dmamap;
999 rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap;
1000 sc_if->msk_cdata.msk_jumbo_rx_sparemap = map;
1001 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap,
1002 BUS_DMASYNC_PREREAD);
1005 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
1006 rx_le->msk_control =
1007 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
1013 * Set media options.
1016 msk_mediachange(struct ifnet *ifp)
1018 struct msk_if_softc *sc_if;
1019 struct mii_data *mii;
1022 sc_if = ifp->if_softc;
1025 mii = device_get_softc(sc_if->msk_miibus);
1026 error = mii_mediachg(mii);
1027 MSK_IF_UNLOCK(sc_if);
1033 * Report current media status.
1036 msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1038 struct msk_if_softc *sc_if;
1039 struct mii_data *mii;
1041 sc_if = ifp->if_softc;
1043 if ((ifp->if_flags & IFF_UP) == 0) {
1044 MSK_IF_UNLOCK(sc_if);
1047 mii = device_get_softc(sc_if->msk_miibus);
1050 ifmr->ifm_active = mii->mii_media_active;
1051 ifmr->ifm_status = mii->mii_media_status;
1052 MSK_IF_UNLOCK(sc_if);
1056 msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1058 struct msk_if_softc *sc_if;
1060 struct mii_data *mii;
1061 int error, mask, reinit;
1063 sc_if = ifp->if_softc;
1064 ifr = (struct ifreq *)data;
1070 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN)
1072 else if (ifp->if_mtu != ifr->ifr_mtu) {
1073 if (ifr->ifr_mtu > ETHERMTU) {
1074 if ((sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) {
1076 MSK_IF_UNLOCK(sc_if);
1079 if ((sc_if->msk_flags &
1080 MSK_FLAG_JUMBO_NOCSUM) != 0) {
1082 ~(MSK_CSUM_FEATURES | CSUM_TSO);
1083 ifp->if_capenable &=
1084 ~(IFCAP_TSO4 | IFCAP_TXCSUM);
1085 VLAN_CAPABILITIES(ifp);
1088 ifp->if_mtu = ifr->ifr_mtu;
1089 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1090 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1091 msk_init_locked(sc_if);
1094 MSK_IF_UNLOCK(sc_if);
1098 if ((ifp->if_flags & IFF_UP) != 0) {
1099 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
1100 ((ifp->if_flags ^ sc_if->msk_if_flags) &
1101 (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1102 msk_rxfilter(sc_if);
1103 else if ((sc_if->msk_flags & MSK_FLAG_DETACH) == 0)
1104 msk_init_locked(sc_if);
1105 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1107 sc_if->msk_if_flags = ifp->if_flags;
1108 MSK_IF_UNLOCK(sc_if);
1113 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1114 msk_rxfilter(sc_if);
1115 MSK_IF_UNLOCK(sc_if);
1119 mii = device_get_softc(sc_if->msk_miibus);
1120 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1125 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1126 if ((mask & IFCAP_TXCSUM) != 0 &&
1127 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
1128 ifp->if_capenable ^= IFCAP_TXCSUM;
1129 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
1130 ifp->if_hwassist |= MSK_CSUM_FEATURES;
1132 ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
1134 if ((mask & IFCAP_RXCSUM) != 0 &&
1135 (IFCAP_RXCSUM & ifp->if_capabilities) != 0) {
1136 ifp->if_capenable ^= IFCAP_RXCSUM;
1137 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0)
1140 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
1141 (IFCAP_VLAN_HWCSUM & ifp->if_capabilities) != 0)
1142 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1143 if ((mask & IFCAP_TSO4) != 0 &&
1144 (IFCAP_TSO4 & ifp->if_capabilities) != 0) {
1145 ifp->if_capenable ^= IFCAP_TSO4;
1146 if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
1147 ifp->if_hwassist |= CSUM_TSO;
1149 ifp->if_hwassist &= ~CSUM_TSO;
1151 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
1152 (IFCAP_VLAN_HWTSO & ifp->if_capabilities) != 0)
1153 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1154 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1155 (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) {
1156 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1157 if ((IFCAP_VLAN_HWTAGGING & ifp->if_capenable) == 0)
1158 ifp->if_capenable &=
1159 ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM);
1160 msk_setvlan(sc_if, ifp);
1162 if (ifp->if_mtu > ETHERMTU &&
1163 (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) {
1164 ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
1165 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
1167 VLAN_CAPABILITIES(ifp);
1168 if (reinit > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1169 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1170 msk_init_locked(sc_if);
1172 MSK_IF_UNLOCK(sc_if);
1175 error = ether_ioctl(ifp, command, data);
1183 mskc_probe(device_t dev)
1185 const struct msk_product *mp;
1186 uint16_t vendor, devid;
1189 vendor = pci_get_vendor(dev);
1190 devid = pci_get_device(dev);
1192 for (i = 0; i < nitems(msk_products); i++, mp++) {
1193 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) {
1194 device_set_desc(dev, mp->msk_name);
1195 return (BUS_PROBE_DEFAULT);
1203 mskc_setup_rambuffer(struct msk_softc *sc)
1208 /* Get adapter SRAM size. */
1209 sc->msk_ramsize = CSR_READ_1(sc, B2_E_0) * 4;
1211 device_printf(sc->msk_dev,
1212 "RAM buffer size : %dKB\n", sc->msk_ramsize);
1213 if (sc->msk_ramsize == 0)
1216 sc->msk_pflags |= MSK_FLAG_RAMBUF;
1218 * Give receiver 2/3 of memory and round down to the multiple
1219 * of 1024. Tx/Rx RAM buffer size of Yukon II should be multiple
1222 sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024);
1223 sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize;
1224 for (i = 0, next = 0; i < sc->msk_num_port; i++) {
1225 sc->msk_rxqstart[i] = next;
1226 sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1;
1227 next = sc->msk_rxqend[i] + 1;
1228 sc->msk_txqstart[i] = next;
1229 sc->msk_txqend[i] = next + sc->msk_txqsize - 1;
1230 next = sc->msk_txqend[i] + 1;
1232 device_printf(sc->msk_dev,
1233 "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i,
1234 sc->msk_rxqsize / 1024, sc->msk_rxqstart[i],
1236 device_printf(sc->msk_dev,
1237 "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i,
1238 sc->msk_txqsize / 1024, sc->msk_txqstart[i],
1247 msk_phy_power(struct msk_softc *sc, int mode)
1253 case MSK_PHY_POWERUP:
1254 /* Switch power to VCC (WA for VAUX problem). */
1255 CSR_WRITE_1(sc, B0_POWER_CTRL,
1256 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
1257 /* Disable Core Clock Division, set Clock Select to 0. */
1258 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
1261 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1262 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1263 /* Enable bits are inverted. */
1264 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1265 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1266 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1269 * Enable PCI & Core Clock, enable clock gating for both Links.
1271 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1273 our = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
1274 our &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
1275 if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
1276 if (sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1277 /* Deassert Low Power for 1st PHY. */
1278 our |= PCI_Y2_PHY1_COMA;
1279 if (sc->msk_num_port > 1)
1280 our |= PCI_Y2_PHY2_COMA;
1283 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U ||
1284 sc->msk_hw_id == CHIP_ID_YUKON_EX ||
1285 sc->msk_hw_id >= CHIP_ID_YUKON_FE_P) {
1286 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_4);
1287 val &= (PCI_FORCE_ASPM_REQUEST |
1288 PCI_ASPM_GPHY_LINK_DOWN | PCI_ASPM_INT_FIFO_EMPTY |
1289 PCI_ASPM_CLKRUN_REQUEST);
1290 /* Set all bits to 0 except bits 15..12. */
1291 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_4, val);
1292 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_5);
1293 val &= PCI_CTL_TIM_VMAIN_AV_MSK;
1294 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_5, val);
1295 CSR_PCI_WRITE_4(sc, PCI_CFG_REG_1, 0);
1296 CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON);
1298 * Disable status race, workaround for
1299 * Yukon EC Ultra & Yukon EX.
1301 val = CSR_READ_4(sc, B2_GP_IO);
1302 val |= GLB_GPIO_STAT_RACE_DIS;
1303 CSR_WRITE_4(sc, B2_GP_IO, val);
1304 CSR_READ_4(sc, B2_GP_IO);
1306 /* Release PHY from PowerDown/COMA mode. */
1307 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, our);
1309 for (i = 0; i < sc->msk_num_port; i++) {
1310 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1312 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1316 case MSK_PHY_POWERDOWN:
1317 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
1318 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD;
1319 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1320 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1321 val &= ~PCI_Y2_PHY1_COMA;
1322 if (sc->msk_num_port > 1)
1323 val &= ~PCI_Y2_PHY2_COMA;
1325 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, val);
1327 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1328 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1329 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1330 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1331 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1332 /* Enable bits are inverted. */
1336 * Disable PCI & Core Clock, disable clock gating for
1339 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1340 CSR_WRITE_1(sc, B0_POWER_CTRL,
1341 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
1349 mskc_reset(struct msk_softc *sc)
1357 if (sc->msk_hw_id >= CHIP_ID_YUKON_XL &&
1358 sc->msk_hw_id <= CHIP_ID_YUKON_SUPR) {
1359 if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
1360 sc->msk_hw_id == CHIP_ID_YUKON_SUPR) {
1361 CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0);
1362 status = CSR_READ_2(sc, B28_Y2_ASF_HCU_CCSR);
1363 /* Clear AHB bridge & microcontroller reset. */
1364 status &= ~(Y2_ASF_HCU_CCSR_AHB_RST |
1365 Y2_ASF_HCU_CCSR_CPU_RST_MODE);
1366 /* Clear ASF microcontroller state. */
1367 status &= ~Y2_ASF_HCU_CCSR_UC_STATE_MSK;
1368 status &= ~Y2_ASF_HCU_CCSR_CPU_CLK_DIVIDE_MSK;
1369 CSR_WRITE_2(sc, B28_Y2_ASF_HCU_CCSR, status);
1370 CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0);
1372 CSR_WRITE_1(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
1373 CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE);
1375 * Since we disabled ASF, S/W reset is required for
1378 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1379 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1382 /* Clear all error bits in the PCI status register. */
1383 status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
1384 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1386 pci_write_config(sc->msk_dev, PCIR_STATUS, status |
1387 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
1388 PCIM_STATUS_RTABORT | PCIM_STATUS_MDPERR, 2);
1389 CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR);
1391 switch (sc->msk_bustype) {
1393 /* Clear all PEX errors. */
1394 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
1395 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
1396 if ((val & PEX_RX_OV) != 0) {
1397 sc->msk_intrmask &= ~Y2_IS_HW_ERR;
1398 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
1403 /* Set Cache Line Size to 2(8bytes) if configured to 0. */
1404 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1);
1406 pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1);
1407 if (sc->msk_bustype == MSK_PCIX_BUS) {
1408 /* Set Cache Line Size opt. */
1409 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1411 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1415 /* Set PHY power state. */
1416 msk_phy_power(sc, MSK_PHY_POWERUP);
1418 /* Reset GPHY/GMAC Control */
1419 for (i = 0; i < sc->msk_num_port; i++) {
1420 /* GPHY Control reset. */
1421 CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET);
1422 CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR);
1423 /* GMAC Control reset. */
1424 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET);
1425 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR);
1426 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF);
1427 if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
1428 sc->msk_hw_id == CHIP_ID_YUKON_SUPR)
1429 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL),
1430 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
1434 if (sc->msk_hw_id == CHIP_ID_YUKON_SUPR &&
1435 sc->msk_hw_rev > CHIP_REV_YU_SU_B0)
1436 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, PCI_CLK_MACSEC_DIS);
1437 if (sc->msk_hw_id == CHIP_ID_YUKON_OPT && sc->msk_hw_rev == 0) {
1438 /* Disable PCIe PHY powerdown(reg 0x80, bit7). */
1439 CSR_WRITE_4(sc, Y2_PEX_PHY_DATA, (0x0080 << 16) | 0x0080);
1441 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1444 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON);
1446 /* Clear TWSI IRQ. */
1447 CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ);
1449 /* Turn off hardware timer. */
1450 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP);
1451 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ);
1453 /* Turn off descriptor polling. */
1454 CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP);
1456 /* Turn off time stamps. */
1457 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP);
1458 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
1461 if (sc->msk_hw_id == CHIP_ID_YUKON_XL ||
1462 sc->msk_hw_id == CHIP_ID_YUKON_EC ||
1463 sc->msk_hw_id == CHIP_ID_YUKON_FE)
1466 /* Configure timeout values. */
1467 for (i = 0; initram > 0 && i < sc->msk_num_port; i++) {
1468 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET);
1469 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
1470 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1),
1472 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1),
1474 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1),
1476 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1),
1478 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1),
1480 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1),
1482 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2),
1484 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2),
1486 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2),
1488 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2),
1490 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2),
1492 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2),
1496 /* Disable all interrupts. */
1497 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1498 CSR_READ_4(sc, B0_HWE_IMSK);
1499 CSR_WRITE_4(sc, B0_IMSK, 0);
1500 CSR_READ_4(sc, B0_IMSK);
1503 * On dual port PCI-X card, there is an problem where status
1504 * can be received out of order due to split transactions.
1506 if (sc->msk_pcixcap != 0 && sc->msk_num_port > 1) {
1509 pcix_cmd = pci_read_config(sc->msk_dev,
1510 sc->msk_pcixcap + PCIXR_COMMAND, 2);
1511 /* Clear Max Outstanding Split Transactions. */
1512 pcix_cmd &= ~PCIXM_COMMAND_MAX_SPLITS;
1513 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1514 pci_write_config(sc->msk_dev,
1515 sc->msk_pcixcap + PCIXR_COMMAND, pcix_cmd, 2);
1516 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1518 if (sc->msk_expcap != 0) {
1519 /* Change Max. Read Request Size to 2048 bytes. */
1520 if (pci_get_max_read_req(sc->msk_dev) == 512)
1521 pci_set_max_read_req(sc->msk_dev, 2048);
1524 /* Clear status list. */
1525 bzero(sc->msk_stat_ring,
1526 sizeof(struct msk_stat_desc) * sc->msk_stat_count);
1527 sc->msk_stat_cons = 0;
1528 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
1529 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1530 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET);
1531 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR);
1532 /* Set the status list base address. */
1533 addr = sc->msk_stat_ring_paddr;
1534 CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr));
1535 CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr));
1536 /* Set the status list last index. */
1537 CSR_WRITE_2(sc, STAT_LAST_IDX, sc->msk_stat_count - 1);
1538 if (sc->msk_hw_id == CHIP_ID_YUKON_EC &&
1539 sc->msk_hw_rev == CHIP_REV_YU_EC_A1) {
1540 /* WA for dev. #4.3 */
1541 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK);
1542 /* WA for dev. #4.18 */
1543 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21);
1544 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07);
1546 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a);
1547 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10);
1548 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1549 sc->msk_hw_rev == CHIP_REV_YU_XL_A0)
1550 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04);
1552 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10);
1553 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190);
1556 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
1558 CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000));
1560 /* Enable status unit. */
1561 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON);
1563 CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START);
1564 CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START);
1565 CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START);
1569 msk_probe(device_t dev)
1571 struct msk_softc *sc;
1574 sc = device_get_softc(device_get_parent(dev));
1576 * Not much to do here. We always know there will be
1577 * at least one GMAC present, and if there are two,
1578 * mskc_attach() will create a second device instance
1581 snprintf(desc, sizeof(desc),
1582 "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x",
1583 model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id,
1585 device_set_desc_copy(dev, desc);
1587 return (BUS_PROBE_DEFAULT);
1591 msk_attach(device_t dev)
1593 struct msk_softc *sc;
1594 struct msk_if_softc *sc_if;
1596 struct msk_mii_data *mmd;
1604 sc_if = device_get_softc(dev);
1605 sc = device_get_softc(device_get_parent(dev));
1606 mmd = device_get_ivars(dev);
1609 sc_if->msk_if_dev = dev;
1610 sc_if->msk_port = port;
1611 sc_if->msk_softc = sc;
1612 sc_if->msk_flags = sc->msk_pflags;
1613 sc->msk_if[port] = sc_if;
1614 /* Setup Tx/Rx queue register offsets. */
1615 if (port == MSK_PORT_A) {
1616 sc_if->msk_txq = Q_XA1;
1617 sc_if->msk_txsq = Q_XS1;
1618 sc_if->msk_rxq = Q_R1;
1620 sc_if->msk_txq = Q_XA2;
1621 sc_if->msk_txsq = Q_XS2;
1622 sc_if->msk_rxq = Q_R2;
1625 callout_init_mtx(&sc_if->msk_tick_ch, &sc_if->msk_softc->msk_mtx, 0);
1626 msk_sysctl_node(sc_if);
1628 if ((error = msk_txrx_dma_alloc(sc_if)) != 0)
1630 msk_rx_dma_jalloc(sc_if);
1632 ifp = sc_if->msk_ifp = if_alloc(IFT_ETHER);
1634 device_printf(sc_if->msk_if_dev, "can not if_alloc()\n");
1638 ifp->if_softc = sc_if;
1639 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1640 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1641 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4;
1643 * Enable Rx checksum offloading if controller supports
1644 * new descriptor formant and controller is not Yukon XL.
1646 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
1647 sc->msk_hw_id != CHIP_ID_YUKON_XL)
1648 ifp->if_capabilities |= IFCAP_RXCSUM;
1649 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 &&
1650 (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0)
1651 ifp->if_capabilities |= IFCAP_RXCSUM;
1652 ifp->if_hwassist = MSK_CSUM_FEATURES | CSUM_TSO;
1653 ifp->if_capenable = ifp->if_capabilities;
1654 ifp->if_ioctl = msk_ioctl;
1655 ifp->if_start = msk_start;
1656 ifp->if_init = msk_init;
1657 IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1);
1658 ifp->if_snd.ifq_drv_maxlen = MSK_TX_RING_CNT - 1;
1659 IFQ_SET_READY(&ifp->if_snd);
1661 * Get station address for this interface. Note that
1662 * dual port cards actually come with three station
1663 * addresses: one for each port, plus an extra. The
1664 * extra one is used by the SysKonnect driver software
1665 * as a 'virtual' station address for when both ports
1666 * are operating in failover mode. Currently we don't
1667 * use this extra address.
1670 for (i = 0; i < ETHER_ADDR_LEN; i++)
1671 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i);
1674 * Call MI attach routine. Can't hold locks when calling into ether_*.
1676 MSK_IF_UNLOCK(sc_if);
1677 ether_ifattach(ifp, eaddr);
1680 /* VLAN capability setup */
1681 ifp->if_capabilities |= IFCAP_VLAN_MTU;
1682 if ((sc_if->msk_flags & MSK_FLAG_NOHWVLAN) == 0) {
1684 * Due to Tx checksum offload hardware bugs, msk(4) manually
1685 * computes checksum for short frames. For VLAN tagged frames
1686 * this workaround does not work so disable checksum offload
1687 * for VLAN interface.
1689 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO;
1691 * Enable Rx checksum offloading for VLAN tagged frames
1692 * if controller support new descriptor format.
1694 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 &&
1695 (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0)
1696 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
1698 ifp->if_capenable = ifp->if_capabilities;
1700 * Disable RX checksum offloading on controllers that don't use
1701 * new descriptor format but give chance to enable it.
1703 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0)
1704 ifp->if_capenable &= ~IFCAP_RXCSUM;
1707 * Tell the upper layer(s) we support long frames.
1708 * Must appear after the call to ether_ifattach() because
1709 * ether_ifattach() sets ifi_hdrlen to the default value.
1711 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1716 MSK_IF_UNLOCK(sc_if);
1717 error = mii_attach(dev, &sc_if->msk_miibus, ifp, msk_mediachange,
1718 msk_mediastatus, BMSR_DEFCAPMASK, PHY_ADDR_MARV, MII_OFFSET_ANY,
1721 device_printf(sc_if->msk_if_dev, "attaching PHYs failed\n");
1722 ether_ifdetach(ifp);
1729 /* Access should be ok even though lock has been dropped */
1730 sc->msk_if[port] = NULL;
1738 * Attach the interface. Allocate softc structures, do ifmedia
1739 * setup and ethernet/BPF attach.
1742 mskc_attach(device_t dev)
1744 struct msk_softc *sc;
1745 struct msk_mii_data *mmd;
1746 int error, msic, msir, reg;
1748 sc = device_get_softc(dev);
1750 mtx_init(&sc->msk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1754 * Map control/status registers.
1756 pci_enable_busmaster(dev);
1758 /* Allocate I/O resource */
1759 #ifdef MSK_USEIOSPACE
1760 sc->msk_res_spec = msk_res_spec_io;
1762 sc->msk_res_spec = msk_res_spec_mem;
1764 sc->msk_irq_spec = msk_irq_spec_legacy;
1765 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
1767 if (sc->msk_res_spec == msk_res_spec_mem)
1768 sc->msk_res_spec = msk_res_spec_io;
1770 sc->msk_res_spec = msk_res_spec_mem;
1771 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
1773 device_printf(dev, "couldn't allocate %s resources\n",
1774 sc->msk_res_spec == msk_res_spec_mem ? "memory" :
1776 mtx_destroy(&sc->msk_mtx);
1781 /* Enable all clocks before accessing any registers. */
1782 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0);
1784 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1785 sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID);
1786 sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f;
1787 /* Bail out if chip is not recognized. */
1788 if (sc->msk_hw_id < CHIP_ID_YUKON_XL ||
1789 sc->msk_hw_id > CHIP_ID_YUKON_OPT ||
1790 sc->msk_hw_id == CHIP_ID_YUKON_UNKNOWN) {
1791 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n",
1792 sc->msk_hw_id, sc->msk_hw_rev);
1793 mtx_destroy(&sc->msk_mtx);
1797 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1798 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1799 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
1800 &sc->msk_process_limit, 0, sysctl_hw_msk_proc_limit, "I",
1801 "max number of Rx events to process");
1803 sc->msk_process_limit = MSK_PROC_DEFAULT;
1804 error = resource_int_value(device_get_name(dev), device_get_unit(dev),
1805 "process_limit", &sc->msk_process_limit);
1807 if (sc->msk_process_limit < MSK_PROC_MIN ||
1808 sc->msk_process_limit > MSK_PROC_MAX) {
1809 device_printf(dev, "process_limit value out of range; "
1810 "using default: %d\n", MSK_PROC_DEFAULT);
1811 sc->msk_process_limit = MSK_PROC_DEFAULT;
1815 sc->msk_int_holdoff = MSK_INT_HOLDOFF_DEFAULT;
1816 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
1817 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
1818 "int_holdoff", CTLFLAG_RW, &sc->msk_int_holdoff, 0,
1819 "Maximum number of time to delay interrupts");
1820 resource_int_value(device_get_name(dev), device_get_unit(dev),
1821 "int_holdoff", &sc->msk_int_holdoff);
1823 sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP);
1824 /* Check number of MACs. */
1825 sc->msk_num_port = 1;
1826 if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) ==
1828 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
1832 /* Check bus type. */
1833 if (pci_find_cap(sc->msk_dev, PCIY_EXPRESS, ®) == 0) {
1834 sc->msk_bustype = MSK_PEX_BUS;
1835 sc->msk_expcap = reg;
1836 } else if (pci_find_cap(sc->msk_dev, PCIY_PCIX, ®) == 0) {
1837 sc->msk_bustype = MSK_PCIX_BUS;
1838 sc->msk_pcixcap = reg;
1840 sc->msk_bustype = MSK_PCI_BUS;
1842 switch (sc->msk_hw_id) {
1843 case CHIP_ID_YUKON_EC:
1844 sc->msk_clock = 125; /* 125 MHz */
1845 sc->msk_pflags |= MSK_FLAG_JUMBO;
1847 case CHIP_ID_YUKON_EC_U:
1848 sc->msk_clock = 125; /* 125 MHz */
1849 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_JUMBO_NOCSUM;
1851 case CHIP_ID_YUKON_EX:
1852 sc->msk_clock = 125; /* 125 MHz */
1853 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2 |
1854 MSK_FLAG_AUTOTX_CSUM;
1856 * Yukon Extreme seems to have silicon bug for
1857 * automatic Tx checksum calculation capability.
1859 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0)
1860 sc->msk_pflags &= ~MSK_FLAG_AUTOTX_CSUM;
1862 * Yukon Extreme A0 could not use store-and-forward
1863 * for jumbo frames, so disable Tx checksum
1864 * offloading for jumbo frames.
1866 if (sc->msk_hw_rev == CHIP_REV_YU_EX_A0)
1867 sc->msk_pflags |= MSK_FLAG_JUMBO_NOCSUM;
1869 case CHIP_ID_YUKON_FE:
1870 sc->msk_clock = 100; /* 100 MHz */
1871 sc->msk_pflags |= MSK_FLAG_FASTETHER;
1873 case CHIP_ID_YUKON_FE_P:
1874 sc->msk_clock = 50; /* 50 MHz */
1875 sc->msk_pflags |= MSK_FLAG_FASTETHER | MSK_FLAG_DESCV2 |
1876 MSK_FLAG_AUTOTX_CSUM;
1877 if (sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
1880 * FE+ A0 has status LE writeback bug so msk(4)
1881 * does not rely on status word of received frame
1882 * in msk_rxeof() which in turn disables all
1883 * hardware assistance bits reported by the status
1884 * word as well as validity of the received frame.
1885 * Just pass received frames to upper stack with
1886 * minimal test and let upper stack handle them.
1888 sc->msk_pflags |= MSK_FLAG_NOHWVLAN |
1889 MSK_FLAG_NORXCHK | MSK_FLAG_NORX_CSUM;
1892 case CHIP_ID_YUKON_XL:
1893 sc->msk_clock = 156; /* 156 MHz */
1894 sc->msk_pflags |= MSK_FLAG_JUMBO;
1896 case CHIP_ID_YUKON_SUPR:
1897 sc->msk_clock = 125; /* 125 MHz */
1898 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2 |
1899 MSK_FLAG_AUTOTX_CSUM;
1901 case CHIP_ID_YUKON_UL_2:
1902 sc->msk_clock = 125; /* 125 MHz */
1903 sc->msk_pflags |= MSK_FLAG_JUMBO;
1905 case CHIP_ID_YUKON_OPT:
1906 sc->msk_clock = 125; /* 125 MHz */
1907 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2;
1910 sc->msk_clock = 156; /* 156 MHz */
1914 /* Allocate IRQ resources. */
1915 msic = pci_msi_count(dev);
1917 device_printf(dev, "MSI count : %d\n", msic);
1918 if (legacy_intr != 0)
1920 if (msi_disable == 0 && msic > 0) {
1922 if (pci_alloc_msi(dev, &msir) == 0) {
1924 sc->msk_pflags |= MSK_FLAG_MSI;
1925 sc->msk_irq_spec = msk_irq_spec_msi;
1927 pci_release_msi(dev);
1931 error = bus_alloc_resources(dev, sc->msk_irq_spec, sc->msk_irq);
1933 device_printf(dev, "couldn't allocate IRQ resources\n");
1937 if ((error = msk_status_dma_alloc(sc)) != 0)
1940 /* Set base interrupt mask. */
1941 sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU;
1942 sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR |
1943 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP;
1945 /* Reset the adapter. */
1948 if ((error = mskc_setup_rambuffer(sc)) != 0)
1951 sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1);
1952 if (sc->msk_devs[MSK_PORT_A] == NULL) {
1953 device_printf(dev, "failed to add child for PORT_A\n");
1957 mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK | M_ZERO);
1958 mmd->port = MSK_PORT_A;
1959 mmd->pmd = sc->msk_pmd;
1960 mmd->mii_flags |= MIIF_DOPAUSE;
1961 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
1962 mmd->mii_flags |= MIIF_HAVEFIBER;
1963 if (sc->msk_pmd == 'P')
1964 mmd->mii_flags |= MIIF_HAVEFIBER | MIIF_MACPRIV0;
1965 device_set_ivars(sc->msk_devs[MSK_PORT_A], mmd);
1967 if (sc->msk_num_port > 1) {
1968 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1);
1969 if (sc->msk_devs[MSK_PORT_B] == NULL) {
1970 device_printf(dev, "failed to add child for PORT_B\n");
1974 mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK |
1976 mmd->port = MSK_PORT_B;
1977 mmd->pmd = sc->msk_pmd;
1978 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
1979 mmd->mii_flags |= MIIF_HAVEFIBER;
1980 if (sc->msk_pmd == 'P')
1981 mmd->mii_flags |= MIIF_HAVEFIBER | MIIF_MACPRIV0;
1982 device_set_ivars(sc->msk_devs[MSK_PORT_B], mmd);
1985 error = bus_generic_attach(dev);
1987 device_printf(dev, "failed to attach port(s)\n");
1991 /* Hook interrupt last to avoid having to lock softc. */
1992 error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET |
1993 INTR_MPSAFE, NULL, msk_intr, sc, &sc->msk_intrhand);
1995 device_printf(dev, "couldn't set up interrupt handler\n");
2006 * Shutdown hardware and free up resources. This can be called any
2007 * time after the mutex has been initialized. It is called in both
2008 * the error case in attach and the normal detach case so it needs
2009 * to be careful about only freeing resources that have actually been
2013 msk_detach(device_t dev)
2015 struct msk_softc *sc;
2016 struct msk_if_softc *sc_if;
2019 sc_if = device_get_softc(dev);
2020 KASSERT(mtx_initialized(&sc_if->msk_softc->msk_mtx),
2021 ("msk mutex not initialized in msk_detach"));
2024 ifp = sc_if->msk_ifp;
2025 if (device_is_attached(dev)) {
2027 sc_if->msk_flags |= MSK_FLAG_DETACH;
2029 /* Can't hold locks while calling detach. */
2030 MSK_IF_UNLOCK(sc_if);
2031 callout_drain(&sc_if->msk_tick_ch);
2033 ether_ifdetach(ifp);
2038 * We're generally called from mskc_detach() which is using
2039 * device_delete_child() to get to here. It's already trashed
2040 * miibus for us, so don't do it here or we'll panic.
2042 * if (sc_if->msk_miibus != NULL) {
2043 * device_delete_child(dev, sc_if->msk_miibus);
2044 * sc_if->msk_miibus = NULL;
2048 msk_rx_dma_jfree(sc_if);
2049 msk_txrx_dma_free(sc_if);
2050 bus_generic_detach(dev);
2052 sc = sc_if->msk_softc;
2053 sc->msk_if[sc_if->msk_port] = NULL;
2054 MSK_IF_UNLOCK(sc_if);
2062 mskc_detach(device_t dev)
2064 struct msk_softc *sc;
2066 sc = device_get_softc(dev);
2067 KASSERT(mtx_initialized(&sc->msk_mtx), ("msk mutex not initialized"));
2069 if (device_is_alive(dev)) {
2070 if (sc->msk_devs[MSK_PORT_A] != NULL) {
2071 free(device_get_ivars(sc->msk_devs[MSK_PORT_A]),
2073 device_delete_child(dev, sc->msk_devs[MSK_PORT_A]);
2075 if (sc->msk_devs[MSK_PORT_B] != NULL) {
2076 free(device_get_ivars(sc->msk_devs[MSK_PORT_B]),
2078 device_delete_child(dev, sc->msk_devs[MSK_PORT_B]);
2080 bus_generic_detach(dev);
2083 /* Disable all interrupts. */
2084 CSR_WRITE_4(sc, B0_IMSK, 0);
2085 CSR_READ_4(sc, B0_IMSK);
2086 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
2087 CSR_READ_4(sc, B0_HWE_IMSK);
2090 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF);
2092 /* Put hardware reset. */
2093 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2095 msk_status_dma_free(sc);
2097 if (sc->msk_intrhand) {
2098 bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand);
2099 sc->msk_intrhand = NULL;
2101 bus_release_resources(dev, sc->msk_irq_spec, sc->msk_irq);
2102 if ((sc->msk_pflags & MSK_FLAG_MSI) != 0)
2103 pci_release_msi(dev);
2104 bus_release_resources(dev, sc->msk_res_spec, sc->msk_res);
2105 mtx_destroy(&sc->msk_mtx);
2110 static bus_dma_tag_t
2111 mskc_get_dma_tag(device_t bus, device_t child __unused)
2114 return (bus_get_dma_tag(bus));
2117 struct msk_dmamap_arg {
2118 bus_addr_t msk_busaddr;
2122 msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2124 struct msk_dmamap_arg *ctx;
2129 ctx->msk_busaddr = segs[0].ds_addr;
2132 /* Create status DMA region. */
2134 msk_status_dma_alloc(struct msk_softc *sc)
2136 struct msk_dmamap_arg ctx;
2141 * It seems controller requires number of status LE entries
2142 * is power of 2 and the maximum number of status LE entries
2143 * is 4096. For dual-port controllers, the number of status
2144 * LE entries should be large enough to hold both port's
2147 count = 3 * MSK_RX_RING_CNT + MSK_TX_RING_CNT;
2148 count = imin(4096, roundup2(count, 1024));
2149 sc->msk_stat_count = count;
2150 stat_sz = count * sizeof(struct msk_stat_desc);
2151 error = bus_dma_tag_create(
2152 bus_get_dma_tag(sc->msk_dev), /* parent */
2153 MSK_STAT_ALIGN, 0, /* alignment, boundary */
2154 BUS_SPACE_MAXADDR, /* lowaddr */
2155 BUS_SPACE_MAXADDR, /* highaddr */
2156 NULL, NULL, /* filter, filterarg */
2157 stat_sz, /* maxsize */
2159 stat_sz, /* maxsegsize */
2161 NULL, NULL, /* lockfunc, lockarg */
2164 device_printf(sc->msk_dev,
2165 "failed to create status DMA tag\n");
2169 /* Allocate DMA'able memory and load the DMA map for status ring. */
2170 error = bus_dmamem_alloc(sc->msk_stat_tag,
2171 (void **)&sc->msk_stat_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
2172 BUS_DMA_ZERO, &sc->msk_stat_map);
2174 device_printf(sc->msk_dev,
2175 "failed to allocate DMA'able memory for status ring\n");
2179 ctx.msk_busaddr = 0;
2180 error = bus_dmamap_load(sc->msk_stat_tag, sc->msk_stat_map,
2181 sc->msk_stat_ring, stat_sz, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
2183 device_printf(sc->msk_dev,
2184 "failed to load DMA'able memory for status ring\n");
2187 sc->msk_stat_ring_paddr = ctx.msk_busaddr;
2193 msk_status_dma_free(struct msk_softc *sc)
2196 /* Destroy status block. */
2197 if (sc->msk_stat_tag) {
2198 if (sc->msk_stat_ring_paddr) {
2199 bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map);
2200 sc->msk_stat_ring_paddr = 0;
2202 if (sc->msk_stat_ring) {
2203 bus_dmamem_free(sc->msk_stat_tag,
2204 sc->msk_stat_ring, sc->msk_stat_map);
2205 sc->msk_stat_ring = NULL;
2207 bus_dma_tag_destroy(sc->msk_stat_tag);
2208 sc->msk_stat_tag = NULL;
2213 msk_txrx_dma_alloc(struct msk_if_softc *sc_if)
2215 struct msk_dmamap_arg ctx;
2216 struct msk_txdesc *txd;
2217 struct msk_rxdesc *rxd;
2221 /* Create parent DMA tag. */
2222 error = bus_dma_tag_create(
2223 bus_get_dma_tag(sc_if->msk_if_dev), /* parent */
2224 1, 0, /* alignment, boundary */
2225 BUS_SPACE_MAXADDR, /* lowaddr */
2226 BUS_SPACE_MAXADDR, /* highaddr */
2227 NULL, NULL, /* filter, filterarg */
2228 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
2230 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
2232 NULL, NULL, /* lockfunc, lockarg */
2233 &sc_if->msk_cdata.msk_parent_tag);
2235 device_printf(sc_if->msk_if_dev,
2236 "failed to create parent DMA tag\n");
2239 /* Create tag for Tx ring. */
2240 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2241 MSK_RING_ALIGN, 0, /* alignment, boundary */
2242 BUS_SPACE_MAXADDR, /* lowaddr */
2243 BUS_SPACE_MAXADDR, /* highaddr */
2244 NULL, NULL, /* filter, filterarg */
2245 MSK_TX_RING_SZ, /* maxsize */
2247 MSK_TX_RING_SZ, /* maxsegsize */
2249 NULL, NULL, /* lockfunc, lockarg */
2250 &sc_if->msk_cdata.msk_tx_ring_tag);
2252 device_printf(sc_if->msk_if_dev,
2253 "failed to create Tx ring DMA tag\n");
2257 /* Create tag for Rx ring. */
2258 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2259 MSK_RING_ALIGN, 0, /* alignment, boundary */
2260 BUS_SPACE_MAXADDR, /* lowaddr */
2261 BUS_SPACE_MAXADDR, /* highaddr */
2262 NULL, NULL, /* filter, filterarg */
2263 MSK_RX_RING_SZ, /* maxsize */
2265 MSK_RX_RING_SZ, /* maxsegsize */
2267 NULL, NULL, /* lockfunc, lockarg */
2268 &sc_if->msk_cdata.msk_rx_ring_tag);
2270 device_printf(sc_if->msk_if_dev,
2271 "failed to create Rx ring DMA tag\n");
2275 /* Create tag for Tx buffers. */
2276 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2277 1, 0, /* alignment, boundary */
2278 BUS_SPACE_MAXADDR, /* lowaddr */
2279 BUS_SPACE_MAXADDR, /* highaddr */
2280 NULL, NULL, /* filter, filterarg */
2281 MSK_TSO_MAXSIZE, /* maxsize */
2282 MSK_MAXTXSEGS, /* nsegments */
2283 MSK_TSO_MAXSGSIZE, /* maxsegsize */
2285 NULL, NULL, /* lockfunc, lockarg */
2286 &sc_if->msk_cdata.msk_tx_tag);
2288 device_printf(sc_if->msk_if_dev,
2289 "failed to create Tx DMA tag\n");
2295 * Workaround hardware hang which seems to happen when Rx buffer
2296 * is not aligned on multiple of FIFO word(8 bytes).
2298 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
2299 rxalign = MSK_RX_BUF_ALIGN;
2300 /* Create tag for Rx buffers. */
2301 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2302 rxalign, 0, /* alignment, boundary */
2303 BUS_SPACE_MAXADDR, /* lowaddr */
2304 BUS_SPACE_MAXADDR, /* highaddr */
2305 NULL, NULL, /* filter, filterarg */
2306 MCLBYTES, /* maxsize */
2308 MCLBYTES, /* maxsegsize */
2310 NULL, NULL, /* lockfunc, lockarg */
2311 &sc_if->msk_cdata.msk_rx_tag);
2313 device_printf(sc_if->msk_if_dev,
2314 "failed to create Rx DMA tag\n");
2318 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
2319 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_tx_ring_tag,
2320 (void **)&sc_if->msk_rdata.msk_tx_ring, BUS_DMA_WAITOK |
2321 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_tx_ring_map);
2323 device_printf(sc_if->msk_if_dev,
2324 "failed to allocate DMA'able memory for Tx ring\n");
2328 ctx.msk_busaddr = 0;
2329 error = bus_dmamap_load(sc_if->msk_cdata.msk_tx_ring_tag,
2330 sc_if->msk_cdata.msk_tx_ring_map, sc_if->msk_rdata.msk_tx_ring,
2331 MSK_TX_RING_SZ, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
2333 device_printf(sc_if->msk_if_dev,
2334 "failed to load DMA'able memory for Tx ring\n");
2337 sc_if->msk_rdata.msk_tx_ring_paddr = ctx.msk_busaddr;
2339 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
2340 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_rx_ring_tag,
2341 (void **)&sc_if->msk_rdata.msk_rx_ring, BUS_DMA_WAITOK |
2342 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_rx_ring_map);
2344 device_printf(sc_if->msk_if_dev,
2345 "failed to allocate DMA'able memory for Rx ring\n");
2349 ctx.msk_busaddr = 0;
2350 error = bus_dmamap_load(sc_if->msk_cdata.msk_rx_ring_tag,
2351 sc_if->msk_cdata.msk_rx_ring_map, sc_if->msk_rdata.msk_rx_ring,
2352 MSK_RX_RING_SZ, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
2354 device_printf(sc_if->msk_if_dev,
2355 "failed to load DMA'able memory for Rx ring\n");
2358 sc_if->msk_rdata.msk_rx_ring_paddr = ctx.msk_busaddr;
2360 /* Create DMA maps for Tx buffers. */
2361 for (i = 0; i < MSK_TX_RING_CNT; i++) {
2362 txd = &sc_if->msk_cdata.msk_txdesc[i];
2364 txd->tx_dmamap = NULL;
2365 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0,
2368 device_printf(sc_if->msk_if_dev,
2369 "failed to create Tx dmamap\n");
2373 /* Create DMA maps for Rx buffers. */
2374 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2375 &sc_if->msk_cdata.msk_rx_sparemap)) != 0) {
2376 device_printf(sc_if->msk_if_dev,
2377 "failed to create spare Rx dmamap\n");
2380 for (i = 0; i < MSK_RX_RING_CNT; i++) {
2381 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2383 rxd->rx_dmamap = NULL;
2384 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2387 device_printf(sc_if->msk_if_dev,
2388 "failed to create Rx dmamap\n");
2398 msk_rx_dma_jalloc(struct msk_if_softc *sc_if)
2400 struct msk_dmamap_arg ctx;
2401 struct msk_rxdesc *jrxd;
2405 if (jumbo_disable != 0 || (sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) {
2406 sc_if->msk_flags &= ~MSK_FLAG_JUMBO;
2407 device_printf(sc_if->msk_if_dev,
2408 "disabling jumbo frame support\n");
2411 /* Create tag for jumbo Rx ring. */
2412 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2413 MSK_RING_ALIGN, 0, /* alignment, boundary */
2414 BUS_SPACE_MAXADDR, /* lowaddr */
2415 BUS_SPACE_MAXADDR, /* highaddr */
2416 NULL, NULL, /* filter, filterarg */
2417 MSK_JUMBO_RX_RING_SZ, /* maxsize */
2419 MSK_JUMBO_RX_RING_SZ, /* maxsegsize */
2421 NULL, NULL, /* lockfunc, lockarg */
2422 &sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2424 device_printf(sc_if->msk_if_dev,
2425 "failed to create jumbo Rx ring DMA tag\n");
2431 * Workaround hardware hang which seems to happen when Rx buffer
2432 * is not aligned on multiple of FIFO word(8 bytes).
2434 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
2435 rxalign = MSK_RX_BUF_ALIGN;
2436 /* Create tag for jumbo Rx buffers. */
2437 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2438 rxalign, 0, /* alignment, boundary */
2439 BUS_SPACE_MAXADDR, /* lowaddr */
2440 BUS_SPACE_MAXADDR, /* highaddr */
2441 NULL, NULL, /* filter, filterarg */
2442 MJUM9BYTES, /* maxsize */
2444 MJUM9BYTES, /* maxsegsize */
2446 NULL, NULL, /* lockfunc, lockarg */
2447 &sc_if->msk_cdata.msk_jumbo_rx_tag);
2449 device_printf(sc_if->msk_if_dev,
2450 "failed to create jumbo Rx DMA tag\n");
2454 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
2455 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2456 (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring,
2457 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
2458 &sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2460 device_printf(sc_if->msk_if_dev,
2461 "failed to allocate DMA'able memory for jumbo Rx ring\n");
2465 ctx.msk_busaddr = 0;
2466 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2467 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
2468 sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ,
2469 msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
2471 device_printf(sc_if->msk_if_dev,
2472 "failed to load DMA'able memory for jumbo Rx ring\n");
2475 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr;
2477 /* Create DMA maps for jumbo Rx buffers. */
2478 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2479 &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) {
2480 device_printf(sc_if->msk_if_dev,
2481 "failed to create spare jumbo Rx dmamap\n");
2484 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2485 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2487 jrxd->rx_dmamap = NULL;
2488 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2491 device_printf(sc_if->msk_if_dev,
2492 "failed to create jumbo Rx dmamap\n");
2500 msk_rx_dma_jfree(sc_if);
2501 device_printf(sc_if->msk_if_dev, "disabling jumbo frame support "
2502 "due to resource shortage\n");
2503 sc_if->msk_flags &= ~MSK_FLAG_JUMBO;
2508 msk_txrx_dma_free(struct msk_if_softc *sc_if)
2510 struct msk_txdesc *txd;
2511 struct msk_rxdesc *rxd;
2515 if (sc_if->msk_cdata.msk_tx_ring_tag) {
2516 if (sc_if->msk_rdata.msk_tx_ring_paddr)
2517 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_ring_tag,
2518 sc_if->msk_cdata.msk_tx_ring_map);
2519 if (sc_if->msk_rdata.msk_tx_ring)
2520 bus_dmamem_free(sc_if->msk_cdata.msk_tx_ring_tag,
2521 sc_if->msk_rdata.msk_tx_ring,
2522 sc_if->msk_cdata.msk_tx_ring_map);
2523 sc_if->msk_rdata.msk_tx_ring = NULL;
2524 sc_if->msk_rdata.msk_tx_ring_paddr = 0;
2525 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_ring_tag);
2526 sc_if->msk_cdata.msk_tx_ring_tag = NULL;
2529 if (sc_if->msk_cdata.msk_rx_ring_tag) {
2530 if (sc_if->msk_rdata.msk_rx_ring_paddr)
2531 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_ring_tag,
2532 sc_if->msk_cdata.msk_rx_ring_map);
2533 if (sc_if->msk_rdata.msk_rx_ring)
2534 bus_dmamem_free(sc_if->msk_cdata.msk_rx_ring_tag,
2535 sc_if->msk_rdata.msk_rx_ring,
2536 sc_if->msk_cdata.msk_rx_ring_map);
2537 sc_if->msk_rdata.msk_rx_ring = NULL;
2538 sc_if->msk_rdata.msk_rx_ring_paddr = 0;
2539 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_ring_tag);
2540 sc_if->msk_cdata.msk_rx_ring_tag = NULL;
2543 if (sc_if->msk_cdata.msk_tx_tag) {
2544 for (i = 0; i < MSK_TX_RING_CNT; i++) {
2545 txd = &sc_if->msk_cdata.msk_txdesc[i];
2546 if (txd->tx_dmamap) {
2547 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag,
2549 txd->tx_dmamap = NULL;
2552 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag);
2553 sc_if->msk_cdata.msk_tx_tag = NULL;
2556 if (sc_if->msk_cdata.msk_rx_tag) {
2557 for (i = 0; i < MSK_RX_RING_CNT; i++) {
2558 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2559 if (rxd->rx_dmamap) {
2560 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2562 rxd->rx_dmamap = NULL;
2565 if (sc_if->msk_cdata.msk_rx_sparemap) {
2566 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2567 sc_if->msk_cdata.msk_rx_sparemap);
2568 sc_if->msk_cdata.msk_rx_sparemap = 0;
2570 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
2571 sc_if->msk_cdata.msk_rx_tag = NULL;
2573 if (sc_if->msk_cdata.msk_parent_tag) {
2574 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag);
2575 sc_if->msk_cdata.msk_parent_tag = NULL;
2580 msk_rx_dma_jfree(struct msk_if_softc *sc_if)
2582 struct msk_rxdesc *jrxd;
2585 /* Jumbo Rx ring. */
2586 if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) {
2587 if (sc_if->msk_rdata.msk_jumbo_rx_ring_paddr)
2588 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2589 sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2590 if (sc_if->msk_rdata.msk_jumbo_rx_ring)
2591 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2592 sc_if->msk_rdata.msk_jumbo_rx_ring,
2593 sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2594 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL;
2595 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = 0;
2596 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2597 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL;
2599 /* Jumbo Rx buffers. */
2600 if (sc_if->msk_cdata.msk_jumbo_rx_tag) {
2601 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2602 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2603 if (jrxd->rx_dmamap) {
2605 sc_if->msk_cdata.msk_jumbo_rx_tag,
2607 jrxd->rx_dmamap = NULL;
2610 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) {
2611 bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag,
2612 sc_if->msk_cdata.msk_jumbo_rx_sparemap);
2613 sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0;
2615 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag);
2616 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL;
2621 msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
2623 struct msk_txdesc *txd, *txd_last;
2624 struct msk_tx_desc *tx_le;
2627 bus_dma_segment_t txsegs[MSK_MAXTXSEGS];
2628 uint32_t control, csum, prod, si;
2629 uint16_t offset, tcp_offset, tso_mtu;
2630 int error, i, nseg, tso;
2632 MSK_IF_LOCK_ASSERT(sc_if);
2634 tcp_offset = offset = 0;
2636 if (((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 &&
2637 (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) ||
2638 ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
2639 (m->m_pkthdr.csum_flags & CSUM_TSO) != 0)) {
2641 * Since mbuf has no protocol specific structure information
2642 * in it we have to inspect protocol information here to
2643 * setup TSO and checksum offload. I don't know why Marvell
2644 * made a such decision in chip design because other GigE
2645 * hardwares normally takes care of all these chores in
2646 * hardware. However, TSO performance of Yukon II is very
2647 * good such that it's worth to implement it.
2649 struct ether_header *eh;
2653 if (M_WRITABLE(m) == 0) {
2654 /* Get a writable copy. */
2655 m = m_dup(*m_head, M_NOWAIT);
2664 offset = sizeof(struct ether_header);
2665 m = m_pullup(m, offset);
2670 eh = mtod(m, struct ether_header *);
2671 /* Check if hardware VLAN insertion is off. */
2672 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2673 offset = sizeof(struct ether_vlan_header);
2674 m = m_pullup(m, offset);
2680 m = m_pullup(m, offset + sizeof(struct ip));
2685 ip = (struct ip *)(mtod(m, char *) + offset);
2686 offset += (ip->ip_hl << 2);
2687 tcp_offset = offset;
2688 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2689 m = m_pullup(m, offset + sizeof(struct tcphdr));
2694 tcp = (struct tcphdr *)(mtod(m, char *) + offset);
2695 offset += (tcp->th_off << 2);
2696 } else if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 &&
2697 (m->m_pkthdr.len < MSK_MIN_FRAMELEN) &&
2698 (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) {
2700 * It seems that Yukon II has Tx checksum offload bug
2701 * for small TCP packets that's less than 60 bytes in
2702 * size (e.g. TCP window probe packet, pure ACK packet).
2703 * Common work around like padding with zeros to make
2704 * the frame minimum ethernet frame size didn't work at
2706 * Instead of disabling checksum offload completely we
2707 * resort to S/W checksum routine when we encounter
2709 * Short UDP packets appear to be handled correctly by
2710 * Yukon II. Also I assume this bug does not happen on
2711 * controllers that use newer descriptor format or
2712 * automatic Tx checksum calculation.
2714 m = m_pullup(m, offset + sizeof(struct tcphdr));
2719 *(uint16_t *)(m->m_data + offset +
2720 m->m_pkthdr.csum_data) = in_cksum_skip(m,
2721 m->m_pkthdr.len, offset);
2722 m->m_pkthdr.csum_flags &= ~CSUM_TCP;
2727 prod = sc_if->msk_cdata.msk_tx_prod;
2728 txd = &sc_if->msk_cdata.msk_txdesc[prod];
2730 map = txd->tx_dmamap;
2731 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, map,
2732 *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2733 if (error == EFBIG) {
2734 m = m_collapse(*m_head, M_NOWAIT, MSK_MAXTXSEGS);
2741 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag,
2742 map, *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2748 } else if (error != 0)
2756 /* Check number of available descriptors. */
2757 if (sc_if->msk_cdata.msk_tx_cnt + nseg >=
2758 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) {
2759 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map);
2767 /* Check TSO support. */
2768 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2769 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0)
2770 tso_mtu = m->m_pkthdr.tso_segsz;
2772 tso_mtu = offset + m->m_pkthdr.tso_segsz;
2773 if (tso_mtu != sc_if->msk_cdata.msk_tso_mtu) {
2774 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2775 tx_le->msk_addr = htole32(tso_mtu);
2776 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0)
2777 tx_le->msk_control = htole32(OP_MSS | HW_OWNER);
2779 tx_le->msk_control =
2780 htole32(OP_LRGLEN | HW_OWNER);
2781 sc_if->msk_cdata.msk_tx_cnt++;
2782 MSK_INC(prod, MSK_TX_RING_CNT);
2783 sc_if->msk_cdata.msk_tso_mtu = tso_mtu;
2787 /* Check if we have a VLAN tag to insert. */
2788 if ((m->m_flags & M_VLANTAG) != 0) {
2789 if (tx_le == NULL) {
2790 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2791 tx_le->msk_addr = htole32(0);
2792 tx_le->msk_control = htole32(OP_VLAN | HW_OWNER |
2793 htons(m->m_pkthdr.ether_vtag));
2794 sc_if->msk_cdata.msk_tx_cnt++;
2795 MSK_INC(prod, MSK_TX_RING_CNT);
2797 tx_le->msk_control |= htole32(OP_VLAN |
2798 htons(m->m_pkthdr.ether_vtag));
2800 control |= INS_VLAN;
2802 /* Check if we have to handle checksum offload. */
2803 if (tso == 0 && (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) {
2804 if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) != 0)
2807 control |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
2808 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2810 /* Checksum write position. */
2811 csum = (tcp_offset + m->m_pkthdr.csum_data) & 0xffff;
2812 /* Checksum start position. */
2813 csum |= (uint32_t)tcp_offset << 16;
2814 if (csum != sc_if->msk_cdata.msk_last_csum) {
2815 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2816 tx_le->msk_addr = htole32(csum);
2817 tx_le->msk_control = htole32(1 << 16 |
2818 (OP_TCPLISW | HW_OWNER));
2819 sc_if->msk_cdata.msk_tx_cnt++;
2820 MSK_INC(prod, MSK_TX_RING_CNT);
2821 sc_if->msk_cdata.msk_last_csum = csum;
2826 #ifdef MSK_64BIT_DMA
2827 if (MSK_ADDR_HI(txsegs[0].ds_addr) !=
2828 sc_if->msk_cdata.msk_tx_high_addr) {
2829 sc_if->msk_cdata.msk_tx_high_addr =
2830 MSK_ADDR_HI(txsegs[0].ds_addr);
2831 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2832 tx_le->msk_addr = htole32(MSK_ADDR_HI(txsegs[0].ds_addr));
2833 tx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
2834 sc_if->msk_cdata.msk_tx_cnt++;
2835 MSK_INC(prod, MSK_TX_RING_CNT);
2839 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2840 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr));
2842 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2845 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2847 sc_if->msk_cdata.msk_tx_cnt++;
2848 MSK_INC(prod, MSK_TX_RING_CNT);
2850 for (i = 1; i < nseg; i++) {
2851 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2852 #ifdef MSK_64BIT_DMA
2853 if (MSK_ADDR_HI(txsegs[i].ds_addr) !=
2854 sc_if->msk_cdata.msk_tx_high_addr) {
2855 sc_if->msk_cdata.msk_tx_high_addr =
2856 MSK_ADDR_HI(txsegs[i].ds_addr);
2857 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2859 htole32(MSK_ADDR_HI(txsegs[i].ds_addr));
2860 tx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
2861 sc_if->msk_cdata.msk_tx_cnt++;
2862 MSK_INC(prod, MSK_TX_RING_CNT);
2863 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2866 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr));
2867 tx_le->msk_control = htole32(txsegs[i].ds_len | control |
2868 OP_BUFFER | HW_OWNER);
2869 sc_if->msk_cdata.msk_tx_cnt++;
2870 MSK_INC(prod, MSK_TX_RING_CNT);
2872 /* Update producer index. */
2873 sc_if->msk_cdata.msk_tx_prod = prod;
2875 /* Set EOP on the last descriptor. */
2876 prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT;
2877 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2878 tx_le->msk_control |= htole32(EOP);
2880 /* Turn the first descriptor ownership to hardware. */
2881 tx_le = &sc_if->msk_rdata.msk_tx_ring[si];
2882 tx_le->msk_control |= htole32(HW_OWNER);
2884 txd = &sc_if->msk_cdata.msk_txdesc[prod];
2885 map = txd_last->tx_dmamap;
2886 txd_last->tx_dmamap = txd->tx_dmamap;
2887 txd->tx_dmamap = map;
2890 /* Sync descriptors. */
2891 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE);
2892 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
2893 sc_if->msk_cdata.msk_tx_ring_map,
2894 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2900 msk_start(struct ifnet *ifp)
2902 struct msk_if_softc *sc_if;
2904 sc_if = ifp->if_softc;
2906 msk_start_locked(ifp);
2907 MSK_IF_UNLOCK(sc_if);
2911 msk_start_locked(struct ifnet *ifp)
2913 struct msk_if_softc *sc_if;
2914 struct mbuf *m_head;
2917 sc_if = ifp->if_softc;
2918 MSK_IF_LOCK_ASSERT(sc_if);
2920 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2921 IFF_DRV_RUNNING || (sc_if->msk_flags & MSK_FLAG_LINK) == 0)
2924 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2925 sc_if->msk_cdata.msk_tx_cnt <
2926 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); ) {
2927 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2931 * Pack the data into the transmit ring. If we
2932 * don't have room, set the OACTIVE flag and wait
2933 * for the NIC to drain the ring.
2935 if (msk_encap(sc_if, &m_head) != 0) {
2938 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2939 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2945 * If there's a BPF listener, bounce a copy of this frame
2948 ETHER_BPF_MTAP(ifp, m_head);
2953 CSR_WRITE_2(sc_if->msk_softc,
2954 Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG),
2955 sc_if->msk_cdata.msk_tx_prod);
2957 /* Set a timeout in case the chip goes out to lunch. */
2958 sc_if->msk_watchdog_timer = MSK_TX_TIMEOUT;
2963 msk_watchdog(struct msk_if_softc *sc_if)
2967 MSK_IF_LOCK_ASSERT(sc_if);
2969 if (sc_if->msk_watchdog_timer == 0 || --sc_if->msk_watchdog_timer)
2971 ifp = sc_if->msk_ifp;
2972 if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0) {
2974 if_printf(sc_if->msk_ifp, "watchdog timeout "
2976 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2977 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2978 msk_init_locked(sc_if);
2982 if_printf(ifp, "watchdog timeout\n");
2983 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2984 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2985 msk_init_locked(sc_if);
2986 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2987 msk_start_locked(ifp);
2991 mskc_shutdown(device_t dev)
2993 struct msk_softc *sc;
2996 sc = device_get_softc(dev);
2998 for (i = 0; i < sc->msk_num_port; i++) {
2999 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
3000 ((sc->msk_if[i]->msk_ifp->if_drv_flags &
3001 IFF_DRV_RUNNING) != 0))
3002 msk_stop(sc->msk_if[i]);
3006 /* Put hardware reset. */
3007 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
3012 mskc_suspend(device_t dev)
3014 struct msk_softc *sc;
3017 sc = device_get_softc(dev);
3021 for (i = 0; i < sc->msk_num_port; i++) {
3022 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
3023 ((sc->msk_if[i]->msk_ifp->if_drv_flags &
3024 IFF_DRV_RUNNING) != 0))
3025 msk_stop(sc->msk_if[i]);
3028 /* Disable all interrupts. */
3029 CSR_WRITE_4(sc, B0_IMSK, 0);
3030 CSR_READ_4(sc, B0_IMSK);
3031 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
3032 CSR_READ_4(sc, B0_HWE_IMSK);
3034 msk_phy_power(sc, MSK_PHY_POWERDOWN);
3036 /* Put hardware reset. */
3037 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
3038 sc->msk_pflags |= MSK_FLAG_SUSPEND;
3046 mskc_resume(device_t dev)
3048 struct msk_softc *sc;
3051 sc = device_get_softc(dev);
3055 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0);
3057 for (i = 0; i < sc->msk_num_port; i++) {
3058 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
3059 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0)) {
3060 sc->msk_if[i]->msk_ifp->if_drv_flags &=
3062 msk_init_locked(sc->msk_if[i]);
3065 sc->msk_pflags &= ~MSK_FLAG_SUSPEND;
3072 #ifndef __NO_STRICT_ALIGNMENT
3073 static __inline void
3074 msk_fixup_rx(struct mbuf *m)
3077 uint16_t *src, *dst;
3079 src = mtod(m, uint16_t *);
3082 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
3085 m->m_data -= (MSK_RX_BUF_ALIGN - ETHER_ALIGN);
3089 static __inline void
3090 msk_rxcsum(struct msk_if_softc *sc_if, uint32_t control, struct mbuf *m)
3092 struct ether_header *eh;
3095 int32_t hlen, len, pktlen, temp32;
3096 uint16_t csum, *opts;
3098 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0) {
3099 if ((control & (CSS_IPV4 | CSS_IPFRAG)) == CSS_IPV4) {
3100 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3101 if ((control & CSS_IPV4_CSUM_OK) != 0)
3102 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3103 if ((control & (CSS_TCP | CSS_UDP)) != 0 &&
3104 (control & (CSS_TCPUDP_CSUM_OK)) != 0) {
3105 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3107 m->m_pkthdr.csum_data = 0xffff;
3113 * Marvell Yukon controllers that support OP_RXCHKS has known
3114 * to have various Rx checksum offloading bugs. These
3115 * controllers can be configured to compute simple checksum
3116 * at two different positions. So we can compute IP and TCP/UDP
3117 * checksum at the same time. We intentionally have controller
3118 * compute TCP/UDP checksum twice by specifying the same
3119 * checksum start position and compare the result. If the value
3120 * is different it would indicate the hardware logic was wrong.
3122 if ((sc_if->msk_csum & 0xFFFF) != (sc_if->msk_csum >> 16)) {
3124 device_printf(sc_if->msk_if_dev,
3125 "Rx checksum value mismatch!\n");
3128 pktlen = m->m_pkthdr.len;
3129 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
3131 eh = mtod(m, struct ether_header *);
3132 if (eh->ether_type != htons(ETHERTYPE_IP))
3134 ip = (struct ip *)(eh + 1);
3135 if (ip->ip_v != IPVERSION)
3138 hlen = ip->ip_hl << 2;
3139 pktlen -= sizeof(struct ether_header);
3140 if (hlen < sizeof(struct ip))
3142 if (ntohs(ip->ip_len) < hlen)
3144 if (ntohs(ip->ip_len) != pktlen)
3146 if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
3147 return; /* can't handle fragmented packet. */
3151 if (pktlen < (hlen + sizeof(struct tcphdr)))
3155 if (pktlen < (hlen + sizeof(struct udphdr)))
3157 uh = (struct udphdr *)((caddr_t)ip + hlen);
3158 if (uh->uh_sum == 0)
3159 return; /* no checksum */
3164 csum = bswap16(sc_if->msk_csum & 0xFFFF);
3165 /* Checksum fixup for IP options. */
3166 len = hlen - sizeof(struct ip);
3168 opts = (uint16_t *)(ip + 1);
3169 for (; len > 0; len -= sizeof(uint16_t), opts++) {
3170 temp32 = csum - *opts;
3171 temp32 = (temp32 >> 16) + (temp32 & 65535);
3172 csum = temp32 & 65535;
3175 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
3176 m->m_pkthdr.csum_data = csum;
3180 msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control,
3185 struct msk_rxdesc *rxd;
3188 ifp = sc_if->msk_ifp;
3190 MSK_IF_LOCK_ASSERT(sc_if);
3192 cons = sc_if->msk_cdata.msk_rx_cons;
3194 rxlen = status >> 16;
3195 if ((status & GMR_FS_VLAN) != 0 &&
3196 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3197 rxlen -= ETHER_VLAN_ENCAP_LEN;
3198 if ((sc_if->msk_flags & MSK_FLAG_NORXCHK) != 0) {
3200 * For controllers that returns bogus status code
3201 * just do minimal check and let upper stack
3202 * handle this frame.
3204 if (len > MSK_MAX_FRAMELEN || len < ETHER_HDR_LEN) {
3205 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
3206 msk_discard_rxbuf(sc_if, cons);
3209 } else if (len > sc_if->msk_framesize ||
3210 ((status & GMR_FS_ANY_ERR) != 0) ||
3211 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
3212 /* Don't count flow-control packet as errors. */
3213 if ((status & GMR_FS_GOOD_FC) == 0)
3214 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
3215 msk_discard_rxbuf(sc_if, cons);
3218 #ifdef MSK_64BIT_DMA
3219 rxd = &sc_if->msk_cdata.msk_rxdesc[(cons + 1) %
3222 rxd = &sc_if->msk_cdata.msk_rxdesc[cons];
3225 if (msk_newbuf(sc_if, cons) != 0) {
3226 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
3227 /* Reuse old buffer. */
3228 msk_discard_rxbuf(sc_if, cons);
3231 m->m_pkthdr.rcvif = ifp;
3232 m->m_pkthdr.len = m->m_len = len;
3233 #ifndef __NO_STRICT_ALIGNMENT
3234 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
3237 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3238 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3239 msk_rxcsum(sc_if, control, m);
3240 /* Check for VLAN tagged packets. */
3241 if ((status & GMR_FS_VLAN) != 0 &&
3242 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
3243 m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
3244 m->m_flags |= M_VLANTAG;
3246 MSK_IF_UNLOCK(sc_if);
3247 (*ifp->if_input)(ifp, m);
3251 MSK_RX_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
3252 MSK_RX_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT);
3256 msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control,
3261 struct msk_rxdesc *jrxd;
3264 ifp = sc_if->msk_ifp;
3266 MSK_IF_LOCK_ASSERT(sc_if);
3268 cons = sc_if->msk_cdata.msk_rx_cons;
3270 rxlen = status >> 16;
3271 if ((status & GMR_FS_VLAN) != 0 &&
3272 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3273 rxlen -= ETHER_VLAN_ENCAP_LEN;
3274 if (len > sc_if->msk_framesize ||
3275 ((status & GMR_FS_ANY_ERR) != 0) ||
3276 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
3277 /* Don't count flow-control packet as errors. */
3278 if ((status & GMR_FS_GOOD_FC) == 0)
3279 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
3280 msk_discard_jumbo_rxbuf(sc_if, cons);
3283 #ifdef MSK_64BIT_DMA
3284 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[(cons + 1) %
3285 MSK_JUMBO_RX_RING_CNT];
3287 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons];
3290 if (msk_jumbo_newbuf(sc_if, cons) != 0) {
3291 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
3292 /* Reuse old buffer. */
3293 msk_discard_jumbo_rxbuf(sc_if, cons);
3296 m->m_pkthdr.rcvif = ifp;
3297 m->m_pkthdr.len = m->m_len = len;
3298 #ifndef __NO_STRICT_ALIGNMENT
3299 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
3302 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3303 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3304 msk_rxcsum(sc_if, control, m);
3305 /* Check for VLAN tagged packets. */
3306 if ((status & GMR_FS_VLAN) != 0 &&
3307 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
3308 m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
3309 m->m_flags |= M_VLANTAG;
3311 MSK_IF_UNLOCK(sc_if);
3312 (*ifp->if_input)(ifp, m);
3316 MSK_RX_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
3317 MSK_RX_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT);
3321 msk_txeof(struct msk_if_softc *sc_if, int idx)
3323 struct msk_txdesc *txd;
3324 struct msk_tx_desc *cur_tx;
3329 MSK_IF_LOCK_ASSERT(sc_if);
3331 ifp = sc_if->msk_ifp;
3333 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
3334 sc_if->msk_cdata.msk_tx_ring_map,
3335 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3337 * Go through our tx ring and free mbufs for those
3338 * frames that have been sent.
3340 cons = sc_if->msk_cdata.msk_tx_cons;
3342 for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) {
3343 if (sc_if->msk_cdata.msk_tx_cnt <= 0)
3346 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons];
3347 control = le32toh(cur_tx->msk_control);
3348 sc_if->msk_cdata.msk_tx_cnt--;
3349 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3350 if ((control & EOP) == 0)
3352 txd = &sc_if->msk_cdata.msk_txdesc[cons];
3353 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap,
3354 BUS_DMASYNC_POSTWRITE);
3355 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap);
3357 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
3358 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!",
3365 sc_if->msk_cdata.msk_tx_cons = cons;
3366 if (sc_if->msk_cdata.msk_tx_cnt == 0)
3367 sc_if->msk_watchdog_timer = 0;
3368 /* No need to sync LEs as we didn't update LEs. */
3373 msk_tick(void *xsc_if)
3375 struct msk_if_softc *sc_if;
3376 struct mii_data *mii;
3380 MSK_IF_LOCK_ASSERT(sc_if);
3382 mii = device_get_softc(sc_if->msk_miibus);
3385 if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0)
3386 msk_miibus_statchg(sc_if->msk_if_dev);
3387 msk_handle_events(sc_if->msk_softc);
3388 msk_watchdog(sc_if);
3389 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
3393 msk_intr_phy(struct msk_if_softc *sc_if)
3397 msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
3398 status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
3399 /* Handle FIFO Underrun/Overflow? */
3400 if ((status & PHY_M_IS_FIFO_ERROR))
3401 device_printf(sc_if->msk_if_dev,
3402 "PHY FIFO underrun/overflow.\n");
3406 msk_intr_gmac(struct msk_if_softc *sc_if)
3408 struct msk_softc *sc;
3411 sc = sc_if->msk_softc;
3412 status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3414 /* GMAC Rx FIFO overrun. */
3415 if ((status & GM_IS_RX_FF_OR) != 0)
3416 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
3418 /* GMAC Tx FIFO underrun. */
3419 if ((status & GM_IS_TX_FF_UR) != 0) {
3420 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3422 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n");
3425 * In case of Tx underrun, we may need to flush/reset
3426 * Tx MAC but that would also require resynchronization
3427 * with status LEs. Reinitializing status LEs would
3428 * affect other port in dual MAC configuration so it
3429 * should be avoided as possible as we can.
3430 * Due to lack of documentation it's all vague guess but
3431 * it needs more investigation.
3437 msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status)
3439 struct msk_softc *sc;
3441 sc = sc_if->msk_softc;
3442 if ((status & Y2_IS_PAR_RD1) != 0) {
3443 device_printf(sc_if->msk_if_dev,
3444 "RAM buffer read parity error\n");
3446 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3449 if ((status & Y2_IS_PAR_WR1) != 0) {
3450 device_printf(sc_if->msk_if_dev,
3451 "RAM buffer write parity error\n");
3453 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3456 if ((status & Y2_IS_PAR_MAC1) != 0) {
3457 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n");
3459 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3462 if ((status & Y2_IS_PAR_RX1) != 0) {
3463 device_printf(sc_if->msk_if_dev, "Rx parity error\n");
3465 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR);
3467 if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) {
3468 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n");
3470 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP);
3475 msk_intr_hwerr(struct msk_softc *sc)
3478 uint32_t tlphead[4];
3480 status = CSR_READ_4(sc, B0_HWE_ISRC);
3481 /* Time Stamp timer overflow. */
3482 if ((status & Y2_IS_TIST_OV) != 0)
3483 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
3484 if ((status & Y2_IS_PCI_NEXP) != 0) {
3486 * PCI Express Error occurred which is not described in PEX
3488 * This error is also mapped either to Master Abort(
3489 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
3490 * can only be cleared there.
3492 device_printf(sc->msk_dev,
3493 "PCI Express protocol violation error\n");
3496 if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) {
3499 if ((status & Y2_IS_MST_ERR) != 0)
3500 device_printf(sc->msk_dev,
3501 "unexpected IRQ Status error\n");
3503 device_printf(sc->msk_dev,
3504 "unexpected IRQ Master error\n");
3505 /* Reset all bits in the PCI status register. */
3506 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
3507 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3508 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 |
3509 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
3510 PCIM_STATUS_RTABORT | PCIM_STATUS_MDPERR, 2);
3511 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3514 /* Check for PCI Express Uncorrectable Error. */
3515 if ((status & Y2_IS_PCI_EXP) != 0) {
3519 * On PCI Express bus bridges are called root complexes (RC).
3520 * PCI Express errors are recognized by the root complex too,
3521 * which requests the system to handle the problem. After
3522 * error occurrence it may be that no access to the adapter
3523 * may be performed any longer.
3526 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
3527 if ((v32 & PEX_UNSUP_REQ) != 0) {
3528 /* Ignore unsupported request error. */
3529 device_printf(sc->msk_dev,
3530 "Uncorrectable PCI Express error\n");
3532 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) {
3535 /* Get TLP header form Log Registers. */
3536 for (i = 0; i < 4; i++)
3537 tlphead[i] = CSR_PCI_READ_4(sc,
3538 PEX_HEADER_LOG + i * 4);
3539 /* Check for vendor defined broadcast message. */
3540 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) {
3541 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
3542 CSR_WRITE_4(sc, B0_HWE_IMSK,
3543 sc->msk_intrhwemask);
3544 CSR_READ_4(sc, B0_HWE_IMSK);
3547 /* Clear the interrupt. */
3548 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3549 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
3550 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3553 if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL)
3554 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status);
3555 if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL)
3556 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8);
3559 static __inline void
3560 msk_rxput(struct msk_if_softc *sc_if)
3562 struct msk_softc *sc;
3564 sc = sc_if->msk_softc;
3565 if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN))
3567 sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
3568 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
3569 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3572 sc_if->msk_cdata.msk_rx_ring_tag,
3573 sc_if->msk_cdata.msk_rx_ring_map,
3574 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3575 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq,
3576 PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod);
3580 msk_handle_events(struct msk_softc *sc)
3582 struct msk_if_softc *sc_if;
3584 struct msk_stat_desc *sd;
3585 uint32_t control, status;
3586 int cons, len, port, rxprog;
3588 if (sc->msk_stat_cons == CSR_READ_2(sc, STAT_PUT_IDX))
3591 /* Sync status LEs. */
3592 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
3593 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3595 rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0;
3597 cons = sc->msk_stat_cons;
3599 sd = &sc->msk_stat_ring[cons];
3600 control = le32toh(sd->msk_control);
3601 if ((control & HW_OWNER) == 0)
3603 control &= ~HW_OWNER;
3604 sd->msk_control = htole32(control);
3605 status = le32toh(sd->msk_status);
3606 len = control & STLE_LEN_MASK;
3607 port = (control >> 16) & 0x01;
3608 sc_if = sc->msk_if[port];
3609 if (sc_if == NULL) {
3610 device_printf(sc->msk_dev, "invalid port opcode "
3611 "0x%08x\n", control & STLE_OP_MASK);
3615 switch (control & STLE_OP_MASK) {
3617 sc_if->msk_vtag = ntohs(len);
3620 sc_if->msk_vtag = ntohs(len);
3623 sc_if->msk_csum = status;
3626 if (!(sc_if->msk_ifp->if_drv_flags & IFF_DRV_RUNNING))
3628 if (sc_if->msk_framesize >
3629 (MCLBYTES - MSK_RX_BUF_ALIGN))
3630 msk_jumbo_rxeof(sc_if, status, control, len);
3632 msk_rxeof(sc_if, status, control, len);
3635 * Because there is no way to sync single Rx LE
3636 * put the DMA sync operation off until the end of
3640 /* Update prefetch unit if we've passed water mark. */
3641 if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) {
3647 if (sc->msk_if[MSK_PORT_A] != NULL)
3648 msk_txeof(sc->msk_if[MSK_PORT_A],
3649 status & STLE_TXA1_MSKL);
3650 if (sc->msk_if[MSK_PORT_B] != NULL)
3651 msk_txeof(sc->msk_if[MSK_PORT_B],
3652 ((status & STLE_TXA2_MSKL) >>
3654 ((len & STLE_TXA2_MSKH) <<
3658 device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n",
3659 control & STLE_OP_MASK);
3662 MSK_INC(cons, sc->msk_stat_count);
3663 if (rxprog > sc->msk_process_limit)
3667 sc->msk_stat_cons = cons;
3668 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
3669 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3671 if (rxput[MSK_PORT_A] > 0)
3672 msk_rxput(sc->msk_if[MSK_PORT_A]);
3673 if (rxput[MSK_PORT_B] > 0)
3674 msk_rxput(sc->msk_if[MSK_PORT_B]);
3676 return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX));
3682 struct msk_softc *sc;
3683 struct msk_if_softc *sc_if0, *sc_if1;
3684 struct ifnet *ifp0, *ifp1;
3691 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
3692 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
3693 if (status == 0 || status == 0xffffffff ||
3694 (sc->msk_pflags & MSK_FLAG_SUSPEND) != 0 ||
3695 (status & sc->msk_intrmask) == 0) {
3696 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3701 sc_if0 = sc->msk_if[MSK_PORT_A];
3702 sc_if1 = sc->msk_if[MSK_PORT_B];
3705 ifp0 = sc_if0->msk_ifp;
3707 ifp1 = sc_if1->msk_ifp;
3709 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
3710 msk_intr_phy(sc_if0);
3711 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
3712 msk_intr_phy(sc_if1);
3713 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
3714 msk_intr_gmac(sc_if0);
3715 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
3716 msk_intr_gmac(sc_if1);
3717 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
3718 device_printf(sc->msk_dev, "Rx descriptor error\n");
3719 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
3720 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3721 CSR_READ_4(sc, B0_IMSK);
3723 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
3724 device_printf(sc->msk_dev, "Tx descriptor error\n");
3725 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
3726 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3727 CSR_READ_4(sc, B0_IMSK);
3729 if ((status & Y2_IS_HW_ERR) != 0)
3732 domore = msk_handle_events(sc);
3733 if ((status & Y2_IS_STAT_BMU) != 0 && domore == 0)
3734 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
3736 /* Reenable interrupts. */
3737 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3739 if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3740 !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
3741 msk_start_locked(ifp0);
3742 if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3743 !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
3744 msk_start_locked(ifp1);
3750 msk_set_tx_stfwd(struct msk_if_softc *sc_if)
3752 struct msk_softc *sc;
3755 ifp = sc_if->msk_ifp;
3756 sc = sc_if->msk_softc;
3757 if ((sc->msk_hw_id == CHIP_ID_YUKON_EX &&
3758 sc->msk_hw_rev != CHIP_REV_YU_EX_A0) ||
3759 sc->msk_hw_id >= CHIP_ID_YUKON_SUPR) {
3760 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3763 if (ifp->if_mtu > ETHERMTU) {
3764 /* Set Tx GMAC FIFO Almost Empty Threshold. */
3766 MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR),
3767 MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR);
3768 /* Disable Store & Forward mode for Tx. */
3769 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3772 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3781 struct msk_if_softc *sc_if = xsc;
3784 msk_init_locked(sc_if);
3785 MSK_IF_UNLOCK(sc_if);
3789 msk_init_locked(struct msk_if_softc *sc_if)
3791 struct msk_softc *sc;
3793 struct mii_data *mii;
3799 MSK_IF_LOCK_ASSERT(sc_if);
3801 ifp = sc_if->msk_ifp;
3802 sc = sc_if->msk_softc;
3803 mii = device_get_softc(sc_if->msk_miibus);
3805 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3809 /* Cancel pending I/O and free all Rx/Tx buffers. */
3812 if (ifp->if_mtu < ETHERMTU)
3813 sc_if->msk_framesize = ETHERMTU;
3815 sc_if->msk_framesize = ifp->if_mtu;
3816 sc_if->msk_framesize += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3817 if (ifp->if_mtu > ETHERMTU &&
3818 (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) {
3819 ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
3820 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
3823 /* GMAC Control reset. */
3824 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_SET);
3825 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_CLR);
3826 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_F_LOOPB_OFF);
3827 if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
3828 sc->msk_hw_id == CHIP_ID_YUKON_SUPR)
3829 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL),
3830 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
3834 * Initialize GMAC first such that speed/duplex/flow-control
3835 * parameters are renegotiated when interface is brought up.
3837 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, 0);
3839 /* Dummy read the Interrupt Source Register. */
3840 CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3842 /* Clear MIB stats. */
3843 msk_stats_clear(sc_if);
3846 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS);
3848 /* Setup Transmit Control Register. */
3849 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
3851 /* Setup Transmit Flow Control Register. */
3852 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff);
3854 /* Setup Transmit Parameter Register. */
3855 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM,
3856 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
3857 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
3859 gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) |
3860 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
3862 if (ifp->if_mtu > ETHERMTU)
3863 gmac |= GM_SMOD_JUMBO_ENA;
3864 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac);
3866 /* Set station address. */
3867 eaddr = IF_LLADDR(ifp);
3868 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L,
3869 eaddr[0] | (eaddr[1] << 8));
3870 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1M,
3871 eaddr[2] | (eaddr[3] << 8));
3872 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1H,
3873 eaddr[4] | (eaddr[5] << 8));
3874 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L,
3875 eaddr[0] | (eaddr[1] << 8));
3876 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2M,
3877 eaddr[2] | (eaddr[3] << 8));
3878 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2H,
3879 eaddr[4] | (eaddr[5] << 8));
3881 /* Disable interrupts for counter overflows. */
3882 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0);
3883 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0);
3884 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0);
3886 /* Configure Rx MAC FIFO. */
3887 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
3888 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR);
3889 reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
3890 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P ||
3891 sc->msk_hw_id == CHIP_ID_YUKON_EX)
3892 reg |= GMF_RX_OVER_ON;
3893 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), reg);
3895 /* Set receive filter. */
3896 msk_rxfilter(sc_if);
3898 if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
3899 /* Clear flush mask - HW bug. */
3900 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 0);
3902 /* Flush Rx MAC FIFO on any flow control or error. */
3903 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK),
3908 * Set Rx FIFO flush threshold to 64 bytes + 1 FIFO word
3909 * due to hardware hang on receipt of pause frames.
3911 reg = RX_GMF_FL_THR_DEF + 1;
3912 /* Another magic for Yukon FE+ - From Linux. */
3913 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
3914 sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0)
3916 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), reg);
3918 /* Configure Tx MAC FIFO. */
3919 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3920 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR);
3921 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON);
3923 /* Configure hardware VLAN tag insertion/stripping. */
3924 msk_setvlan(sc_if, ifp);
3926 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) {
3927 /* Set Rx Pause threshold. */
3928 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR),
3930 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR),
3932 /* Configure store-and-forward for Tx. */
3933 msk_set_tx_stfwd(sc_if);
3936 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
3937 sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
3938 /* Disable dynamic watermark - from Linux. */
3939 reg = CSR_READ_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA));
3941 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA), reg);
3945 * Disable Force Sync bit and Alloc bit in Tx RAM interface
3946 * arbiter as we don't use Sync Tx queue.
3948 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL),
3949 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
3950 /* Enable the RAM Interface Arbiter. */
3951 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB);
3953 /* Setup RAM buffer. */
3954 msk_set_rambuffer(sc_if);
3956 /* Disable Tx sync Queue. */
3957 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET);
3959 /* Setup Tx Queue Bus Memory Interface. */
3960 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET);
3961 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT);
3962 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON);
3963 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM);
3964 switch (sc->msk_hw_id) {
3965 case CHIP_ID_YUKON_EC_U:
3966 if (sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) {
3967 /* Fix for Yukon-EC Ultra: set BMU FIFO level */
3968 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL),
3972 case CHIP_ID_YUKON_EX:
3974 * Yukon Extreme seems to have silicon bug for
3975 * automatic Tx checksum calculation capability.
3977 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0)
3978 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_F),
3983 /* Setup Rx Queue Bus Memory Interface. */
3984 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET);
3985 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT);
3986 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON);
3987 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM);
3988 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
3989 sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) {
3990 /* MAC Rx RAM Read is controlled by hardware. */
3991 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS);
3994 msk_set_prefetch(sc, sc_if->msk_txq,
3995 sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1);
3996 msk_init_tx_ring(sc_if);
3998 /* Disable Rx checksum offload and RSS hash. */
3999 reg = BMU_DIS_RX_RSS_HASH;
4000 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
4001 (ifp->if_capenable & IFCAP_RXCSUM) != 0)
4002 reg |= BMU_ENA_RX_CHKSUM;
4004 reg |= BMU_DIS_RX_CHKSUM;
4005 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), reg);
4006 if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN)) {
4007 msk_set_prefetch(sc, sc_if->msk_rxq,
4008 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr,
4009 MSK_JUMBO_RX_RING_CNT - 1);
4010 error = msk_init_jumbo_rx_ring(sc_if);
4012 msk_set_prefetch(sc, sc_if->msk_rxq,
4013 sc_if->msk_rdata.msk_rx_ring_paddr,
4014 MSK_RX_RING_CNT - 1);
4015 error = msk_init_rx_ring(sc_if);
4018 device_printf(sc_if->msk_if_dev,
4019 "initialization failed: no memory for Rx buffers\n");
4023 if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
4024 sc->msk_hw_id == CHIP_ID_YUKON_SUPR) {
4025 /* Disable flushing of non-ASF packets. */
4026 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
4027 GMF_RX_MACSEC_FLUSH_OFF);
4030 /* Configure interrupt handling. */
4031 if (sc_if->msk_port == MSK_PORT_A) {
4032 sc->msk_intrmask |= Y2_IS_PORT_A;
4033 sc->msk_intrhwemask |= Y2_HWE_L1_MASK;
4035 sc->msk_intrmask |= Y2_IS_PORT_B;
4036 sc->msk_intrhwemask |= Y2_HWE_L2_MASK;
4038 /* Configure IRQ moderation mask. */
4039 CSR_WRITE_4(sc, B2_IRQM_MSK, sc->msk_intrmask);
4040 if (sc->msk_int_holdoff > 0) {
4041 /* Configure initial IRQ moderation timer value. */
4042 CSR_WRITE_4(sc, B2_IRQM_INI,
4043 MSK_USECS(sc, sc->msk_int_holdoff));
4044 CSR_WRITE_4(sc, B2_IRQM_VAL,
4045 MSK_USECS(sc, sc->msk_int_holdoff));
4046 /* Start IRQ moderation. */
4047 CSR_WRITE_1(sc, B2_IRQM_CTRL, TIM_START);
4049 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
4050 CSR_READ_4(sc, B0_HWE_IMSK);
4051 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
4052 CSR_READ_4(sc, B0_IMSK);
4054 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4055 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4057 sc_if->msk_flags &= ~MSK_FLAG_LINK;
4060 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
4064 msk_set_rambuffer(struct msk_if_softc *sc_if)
4066 struct msk_softc *sc;
4069 sc = sc_if->msk_softc;
4070 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
4073 /* Setup Rx Queue. */
4074 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR);
4075 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START),
4076 sc->msk_rxqstart[sc_if->msk_port] / 8);
4077 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END),
4078 sc->msk_rxqend[sc_if->msk_port] / 8);
4079 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP),
4080 sc->msk_rxqstart[sc_if->msk_port] / 8);
4081 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP),
4082 sc->msk_rxqstart[sc_if->msk_port] / 8);
4084 utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
4085 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8;
4086 ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
4087 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8;
4088 if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE)
4089 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8;
4090 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp);
4091 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp);
4092 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */
4094 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD);
4095 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL));
4097 /* Setup Tx Queue. */
4098 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR);
4099 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START),
4100 sc->msk_txqstart[sc_if->msk_port] / 8);
4101 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END),
4102 sc->msk_txqend[sc_if->msk_port] / 8);
4103 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP),
4104 sc->msk_txqstart[sc_if->msk_port] / 8);
4105 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP),
4106 sc->msk_txqstart[sc_if->msk_port] / 8);
4107 /* Enable Store & Forward for Tx side. */
4108 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD);
4109 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD);
4110 CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL));
4114 msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr,
4118 /* Reset the prefetch unit. */
4119 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
4121 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
4123 /* Set LE base address. */
4124 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG),
4126 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG),
4128 /* Set the list last index. */
4129 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG),
4131 /* Turn on prefetch unit. */
4132 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
4134 /* Dummy read to ensure write. */
4135 CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG));
4139 msk_stop(struct msk_if_softc *sc_if)
4141 struct msk_softc *sc;
4142 struct msk_txdesc *txd;
4143 struct msk_rxdesc *rxd;
4144 struct msk_rxdesc *jrxd;
4149 MSK_IF_LOCK_ASSERT(sc_if);
4150 sc = sc_if->msk_softc;
4151 ifp = sc_if->msk_ifp;
4153 callout_stop(&sc_if->msk_tick_ch);
4154 sc_if->msk_watchdog_timer = 0;
4156 /* Disable interrupts. */
4157 if (sc_if->msk_port == MSK_PORT_A) {
4158 sc->msk_intrmask &= ~Y2_IS_PORT_A;
4159 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK;
4161 sc->msk_intrmask &= ~Y2_IS_PORT_B;
4162 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK;
4164 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
4165 CSR_READ_4(sc, B0_HWE_IMSK);
4166 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
4167 CSR_READ_4(sc, B0_IMSK);
4169 /* Disable Tx/Rx MAC. */
4170 val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
4171 val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
4172 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val);
4173 /* Read again to ensure writing. */
4174 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
4175 /* Update stats and clear counters. */
4176 msk_stats_update(sc_if);
4179 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP);
4180 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
4181 for (i = 0; i < MSK_TIMEOUT; i++) {
4182 if ((val & (BMU_STOP | BMU_IDLE)) == 0) {
4183 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
4185 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
4190 if (i == MSK_TIMEOUT)
4191 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n");
4192 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL),
4193 RB_RST_SET | RB_DIS_OP_MD);
4195 /* Disable all GMAC interrupt. */
4196 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0);
4197 /* Disable PHY interrupt. */
4198 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
4200 /* Disable the RAM Interface Arbiter. */
4201 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB);
4203 /* Reset the PCI FIFO of the async Tx queue */
4204 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
4205 BMU_RST_SET | BMU_FIFO_RST);
4207 /* Reset the Tx prefetch units. */
4208 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG),
4211 /* Reset the RAM Buffer async Tx queue. */
4212 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET);
4214 /* Reset Tx MAC FIFO. */
4215 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
4216 /* Set Pause Off. */
4217 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF);
4220 * The Rx Stop command will not work for Yukon-2 if the BMU does not
4221 * reach the end of packet and since we can't make sure that we have
4222 * incoming data, we must reset the BMU while it is not during a DMA
4223 * transfer. Since it is possible that the Rx path is still active,
4224 * the Rx RAM buffer will be stopped first, so any possible incoming
4225 * data will not trigger a DMA. After the RAM buffer is stopped, the
4226 * BMU is polled until any DMA in progress is ended and only then it
4230 /* Disable the RAM Buffer receive queue. */
4231 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD);
4232 for (i = 0; i < MSK_TIMEOUT; i++) {
4233 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) ==
4234 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL)))
4238 if (i == MSK_TIMEOUT)
4239 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n");
4240 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
4241 BMU_RST_SET | BMU_FIFO_RST);
4242 /* Reset the Rx prefetch unit. */
4243 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG),
4245 /* Reset the RAM Buffer receive queue. */
4246 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET);
4247 /* Reset Rx MAC FIFO. */
4248 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
4250 /* Free Rx and Tx mbufs still in the queues. */
4251 for (i = 0; i < MSK_RX_RING_CNT; i++) {
4252 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
4253 if (rxd->rx_m != NULL) {
4254 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag,
4255 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
4256 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag,
4262 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
4263 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
4264 if (jrxd->rx_m != NULL) {
4265 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
4266 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
4267 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
4269 m_freem(jrxd->rx_m);
4273 for (i = 0; i < MSK_TX_RING_CNT; i++) {
4274 txd = &sc_if->msk_cdata.msk_txdesc[i];
4275 if (txd->tx_m != NULL) {
4276 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag,
4277 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
4278 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag,
4286 * Mark the interface down.
4288 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4289 sc_if->msk_flags &= ~MSK_FLAG_LINK;
4293 * When GM_PAR_MIB_CLR bit of GM_PHY_ADDR is set, reading lower
4294 * counter clears high 16 bits of the counter such that accessing
4295 * lower 16 bits should be the last operation.
4297 #define MSK_READ_MIB32(x, y) \
4298 (((uint32_t)GMAC_READ_2(sc, x, (y) + 4)) << 16) + \
4299 (uint32_t)GMAC_READ_2(sc, x, y)
4300 #define MSK_READ_MIB64(x, y) \
4301 (((uint64_t)MSK_READ_MIB32(x, (y) + 8)) << 32) + \
4302 (uint64_t)MSK_READ_MIB32(x, y)
4305 msk_stats_clear(struct msk_if_softc *sc_if)
4307 struct msk_softc *sc;
4312 MSK_IF_LOCK_ASSERT(sc_if);
4314 sc = sc_if->msk_softc;
4315 /* Set MIB Clear Counter Mode. */
4316 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
4317 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
4318 /* Read all MIB Counters with Clear Mode set. */
4319 for (i = GM_RXF_UC_OK; i <= GM_TXE_FIFO_UR; i += sizeof(uint32_t))
4320 reg = MSK_READ_MIB32(sc_if->msk_port, i);
4321 /* Clear MIB Clear Counter Mode. */
4322 gmac &= ~GM_PAR_MIB_CLR;
4323 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
4327 msk_stats_update(struct msk_if_softc *sc_if)
4329 struct msk_softc *sc;
4331 struct msk_hw_stats *stats;
4335 MSK_IF_LOCK_ASSERT(sc_if);
4337 ifp = sc_if->msk_ifp;
4338 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
4340 sc = sc_if->msk_softc;
4341 stats = &sc_if->msk_stats;
4342 /* Set MIB Clear Counter Mode. */
4343 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
4344 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
4347 stats->rx_ucast_frames +=
4348 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_UC_OK);
4349 stats->rx_bcast_frames +=
4350 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_BC_OK);
4351 stats->rx_pause_frames +=
4352 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MPAUSE);
4353 stats->rx_mcast_frames +=
4354 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MC_OK);
4355 stats->rx_crc_errs +=
4356 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_FCS_ERR);
4357 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE1);
4358 stats->rx_good_octets +=
4359 MSK_READ_MIB64(sc_if->msk_port, GM_RXO_OK_LO);
4360 stats->rx_bad_octets +=
4361 MSK_READ_MIB64(sc_if->msk_port, GM_RXO_ERR_LO);
4363 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SHT);
4364 stats->rx_runt_errs +=
4365 MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FRAG);
4366 stats->rx_pkts_64 +=
4367 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_64B);
4368 stats->rx_pkts_65_127 +=
4369 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_127B);
4370 stats->rx_pkts_128_255 +=
4371 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_255B);
4372 stats->rx_pkts_256_511 +=
4373 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_511B);
4374 stats->rx_pkts_512_1023 +=
4375 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1023B);
4376 stats->rx_pkts_1024_1518 +=
4377 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1518B);
4378 stats->rx_pkts_1519_max +=
4379 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MAX_SZ);
4380 stats->rx_pkts_too_long +=
4381 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_LNG_ERR);
4382 stats->rx_pkts_jabbers +=
4383 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_JAB_PKT);
4384 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE2);
4385 stats->rx_fifo_oflows +=
4386 MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FIFO_OV);
4387 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE3);
4390 stats->tx_ucast_frames +=
4391 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_UC_OK);
4392 stats->tx_bcast_frames +=
4393 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_BC_OK);
4394 stats->tx_pause_frames +=
4395 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MPAUSE);
4396 stats->tx_mcast_frames +=
4397 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MC_OK);
4399 MSK_READ_MIB64(sc_if->msk_port, GM_TXO_OK_LO);
4400 stats->tx_pkts_64 +=
4401 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_64B);
4402 stats->tx_pkts_65_127 +=
4403 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_127B);
4404 stats->tx_pkts_128_255 +=
4405 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_255B);
4406 stats->tx_pkts_256_511 +=
4407 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_511B);
4408 stats->tx_pkts_512_1023 +=
4409 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1023B);
4410 stats->tx_pkts_1024_1518 +=
4411 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1518B);
4412 stats->tx_pkts_1519_max +=
4413 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MAX_SZ);
4414 reg = MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SPARE1);
4416 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_COL);
4417 stats->tx_late_colls +=
4418 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_LAT_COL);
4419 stats->tx_excess_colls +=
4420 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_ABO_COL);
4421 stats->tx_multi_colls +=
4422 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MUL_COL);
4423 stats->tx_single_colls +=
4424 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SNG_COL);
4425 stats->tx_underflows +=
4426 MSK_READ_MIB32(sc_if->msk_port, GM_TXE_FIFO_UR);
4427 /* Clear MIB Clear Counter Mode. */
4428 gmac &= ~GM_PAR_MIB_CLR;
4429 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
4433 msk_sysctl_stat32(SYSCTL_HANDLER_ARGS)
4435 struct msk_softc *sc;
4436 struct msk_if_softc *sc_if;
4437 uint32_t result, *stat;
4440 sc_if = (struct msk_if_softc *)arg1;
4441 sc = sc_if->msk_softc;
4443 stat = (uint32_t *)((uint8_t *)&sc_if->msk_stats + off);
4446 result = MSK_READ_MIB32(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2);
4448 MSK_IF_UNLOCK(sc_if);
4450 return (sysctl_handle_int(oidp, &result, 0, req));
4454 msk_sysctl_stat64(SYSCTL_HANDLER_ARGS)
4456 struct msk_softc *sc;
4457 struct msk_if_softc *sc_if;
4458 uint64_t result, *stat;
4461 sc_if = (struct msk_if_softc *)arg1;
4462 sc = sc_if->msk_softc;
4464 stat = (uint64_t *)((uint8_t *)&sc_if->msk_stats + off);
4467 result = MSK_READ_MIB64(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2);
4469 MSK_IF_UNLOCK(sc_if);
4471 return (sysctl_handle_64(oidp, &result, 0, req));
4474 #undef MSK_READ_MIB32
4475 #undef MSK_READ_MIB64
4477 #define MSK_SYSCTL_STAT32(sc, c, o, p, n, d) \
4478 SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_UINT | CTLFLAG_RD, \
4479 sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat32, \
4481 #define MSK_SYSCTL_STAT64(sc, c, o, p, n, d) \
4482 SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_U64 | CTLFLAG_RD, \
4483 sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat64, \
4487 msk_sysctl_node(struct msk_if_softc *sc_if)
4489 struct sysctl_ctx_list *ctx;
4490 struct sysctl_oid_list *child, *schild;
4491 struct sysctl_oid *tree;
4493 ctx = device_get_sysctl_ctx(sc_if->msk_if_dev);
4494 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc_if->msk_if_dev));
4496 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
4497 NULL, "MSK Statistics");
4498 schild = SYSCTL_CHILDREN(tree);
4499 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
4500 NULL, "MSK RX Statistics");
4501 child = SYSCTL_CHILDREN(tree);
4502 MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames",
4503 child, rx_ucast_frames, "Good unicast frames");
4504 MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames",
4505 child, rx_bcast_frames, "Good broadcast frames");
4506 MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames",
4507 child, rx_pause_frames, "Pause frames");
4508 MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames",
4509 child, rx_mcast_frames, "Multicast frames");
4510 MSK_SYSCTL_STAT32(sc_if, ctx, "crc_errs",
4511 child, rx_crc_errs, "CRC errors");
4512 MSK_SYSCTL_STAT64(sc_if, ctx, "good_octets",
4513 child, rx_good_octets, "Good octets");
4514 MSK_SYSCTL_STAT64(sc_if, ctx, "bad_octets",
4515 child, rx_bad_octets, "Bad octets");
4516 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64",
4517 child, rx_pkts_64, "64 bytes frames");
4518 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127",
4519 child, rx_pkts_65_127, "65 to 127 bytes frames");
4520 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255",
4521 child, rx_pkts_128_255, "128 to 255 bytes frames");
4522 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511",
4523 child, rx_pkts_256_511, "256 to 511 bytes frames");
4524 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023",
4525 child, rx_pkts_512_1023, "512 to 1023 bytes frames");
4526 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518",
4527 child, rx_pkts_1024_1518, "1024 to 1518 bytes frames");
4528 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max",
4529 child, rx_pkts_1519_max, "1519 to max frames");
4530 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_too_long",
4531 child, rx_pkts_too_long, "frames too long");
4532 MSK_SYSCTL_STAT32(sc_if, ctx, "jabbers",
4533 child, rx_pkts_jabbers, "Jabber errors");
4534 MSK_SYSCTL_STAT32(sc_if, ctx, "overflows",
4535 child, rx_fifo_oflows, "FIFO overflows");
4537 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
4538 NULL, "MSK TX Statistics");
4539 child = SYSCTL_CHILDREN(tree);
4540 MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames",
4541 child, tx_ucast_frames, "Unicast frames");
4542 MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames",
4543 child, tx_bcast_frames, "Broadcast frames");
4544 MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames",
4545 child, tx_pause_frames, "Pause frames");
4546 MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames",
4547 child, tx_mcast_frames, "Multicast frames");
4548 MSK_SYSCTL_STAT64(sc_if, ctx, "octets",
4549 child, tx_octets, "Octets");
4550 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64",
4551 child, tx_pkts_64, "64 bytes frames");
4552 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127",
4553 child, tx_pkts_65_127, "65 to 127 bytes frames");
4554 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255",
4555 child, tx_pkts_128_255, "128 to 255 bytes frames");
4556 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511",
4557 child, tx_pkts_256_511, "256 to 511 bytes frames");
4558 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023",
4559 child, tx_pkts_512_1023, "512 to 1023 bytes frames");
4560 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518",
4561 child, tx_pkts_1024_1518, "1024 to 1518 bytes frames");
4562 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max",
4563 child, tx_pkts_1519_max, "1519 to max frames");
4564 MSK_SYSCTL_STAT32(sc_if, ctx, "colls",
4565 child, tx_colls, "Collisions");
4566 MSK_SYSCTL_STAT32(sc_if, ctx, "late_colls",
4567 child, tx_late_colls, "Late collisions");
4568 MSK_SYSCTL_STAT32(sc_if, ctx, "excess_colls",
4569 child, tx_excess_colls, "Excessive collisions");
4570 MSK_SYSCTL_STAT32(sc_if, ctx, "multi_colls",
4571 child, tx_multi_colls, "Multiple collisions");
4572 MSK_SYSCTL_STAT32(sc_if, ctx, "single_colls",
4573 child, tx_single_colls, "Single collisions");
4574 MSK_SYSCTL_STAT32(sc_if, ctx, "underflows",
4575 child, tx_underflows, "FIFO underflows");
4578 #undef MSK_SYSCTL_STAT32
4579 #undef MSK_SYSCTL_STAT64
4582 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
4588 value = *(int *)arg1;
4589 error = sysctl_handle_int(oidp, &value, 0, req);
4590 if (error || !req->newptr)
4592 if (value < low || value > high)
4594 *(int *)arg1 = value;
4600 sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS)
4603 return (sysctl_int_range(oidp, arg1, arg2, req, MSK_PROC_MIN,