1 /******************************************************************************
4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x
5 * Version: $Revision: 1.23 $
6 * Date : $Date: 2005/12/22 09:04:11 $
7 * Purpose: Main driver source file
9 *****************************************************************************/
11 /******************************************************************************
14 * Copyright (C) Marvell International Ltd. and/or its affiliates
16 * The computer program files contained in this folder ("Files")
17 * are provided to you under the BSD-type license terms provided
18 * below, and any use of such Files and any derivative works
19 * thereof created by you shall be governed by the following terms
22 * - Redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer.
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials provided
27 * with the distribution.
28 * - Neither the name of Marvell nor the names of its contributors
29 * may be used to endorse or promote products derived from this
30 * software without specific prior written permission.
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
38 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
39 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
46 *****************************************************************************/
49 * Copyright (c) 1997, 1998, 1999, 2000
50 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
52 * Redistribution and use in source and binary forms, with or without
53 * modification, are permitted provided that the following conditions
55 * 1. Redistributions of source code must retain the above copyright
56 * notice, this list of conditions and the following disclaimer.
57 * 2. Redistributions in binary form must reproduce the above copyright
58 * notice, this list of conditions and the following disclaimer in the
59 * documentation and/or other materials provided with the distribution.
60 * 3. All advertising materials mentioning features or use of this software
61 * must display the following acknowledgement:
62 * This product includes software developed by Bill Paul.
63 * 4. Neither the name of the author nor the names of any co-contributors
64 * may be used to endorse or promote products derived from this software
65 * without specific prior written permission.
67 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
71 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
72 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
73 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
74 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
75 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
76 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
77 * THE POSSIBILITY OF SUCH DAMAGE.
80 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
82 * Permission to use, copy, modify, and distribute this software for any
83 * purpose with or without fee is hereby granted, provided that the above
84 * copyright notice and this permission notice appear in all copies.
86 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
87 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
88 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
89 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
90 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
91 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
92 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
96 * Device driver for the Marvell Yukon II Ethernet controller.
97 * Due to lack of documentation, this driver is based on the code from
98 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x.
101 #include <sys/cdefs.h>
102 __FBSDID("$FreeBSD$");
104 #include <sys/param.h>
105 #include <sys/systm.h>
107 #include <sys/endian.h>
108 #include <sys/mbuf.h>
109 #include <sys/malloc.h>
110 #include <sys/kernel.h>
111 #include <sys/module.h>
112 #include <sys/socket.h>
113 #include <sys/sockio.h>
114 #include <sys/queue.h>
115 #include <sys/sysctl.h>
116 #include <sys/taskqueue.h>
119 #include <net/ethernet.h>
121 #include <net/if_arp.h>
122 #include <net/if_dl.h>
123 #include <net/if_media.h>
124 #include <net/if_types.h>
125 #include <net/if_vlan_var.h>
127 #include <netinet/in.h>
128 #include <netinet/in_systm.h>
129 #include <netinet/ip.h>
130 #include <netinet/tcp.h>
131 #include <netinet/udp.h>
133 #include <machine/bus.h>
134 #include <machine/in_cksum.h>
135 #include <machine/resource.h>
136 #include <sys/rman.h>
138 #include <dev/mii/mii.h>
139 #include <dev/mii/miivar.h>
140 #include <dev/mii/brgphyreg.h>
142 #include <dev/pci/pcireg.h>
143 #include <dev/pci/pcivar.h>
145 #include <dev/msk/if_mskreg.h>
147 MODULE_DEPEND(msk, pci, 1, 1, 1);
148 MODULE_DEPEND(msk, ether, 1, 1, 1);
149 MODULE_DEPEND(msk, miibus, 1, 1, 1);
151 /* "device miibus" required. See GENERIC if you get errors here. */
152 #include "miibus_if.h"
155 static int msi_disable = 0;
156 TUNABLE_INT("hw.msk.msi_disable", &msi_disable);
157 static int legacy_intr = 0;
158 TUNABLE_INT("hw.msk.legacy_intr", &legacy_intr);
159 static int jumbo_disable = 0;
160 TUNABLE_INT("hw.msk.jumbo_disable", &jumbo_disable);
162 #define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
165 * Devices supported by this driver.
167 static struct msk_product {
168 uint16_t msk_vendorid;
169 uint16_t msk_deviceid;
170 const char *msk_name;
172 { VENDORID_SK, DEVICEID_SK_YUKON2,
173 "SK-9Sxx Gigabit Ethernet" },
174 { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR,
175 "SK-9Exx Gigabit Ethernet"},
176 { VENDORID_MARVELL, DEVICEID_MRVL_8021CU,
177 "Marvell Yukon 88E8021CU Gigabit Ethernet" },
178 { VENDORID_MARVELL, DEVICEID_MRVL_8021X,
179 "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" },
180 { VENDORID_MARVELL, DEVICEID_MRVL_8022CU,
181 "Marvell Yukon 88E8022CU Gigabit Ethernet" },
182 { VENDORID_MARVELL, DEVICEID_MRVL_8022X,
183 "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" },
184 { VENDORID_MARVELL, DEVICEID_MRVL_8061CU,
185 "Marvell Yukon 88E8061CU Gigabit Ethernet" },
186 { VENDORID_MARVELL, DEVICEID_MRVL_8061X,
187 "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" },
188 { VENDORID_MARVELL, DEVICEID_MRVL_8062CU,
189 "Marvell Yukon 88E8062CU Gigabit Ethernet" },
190 { VENDORID_MARVELL, DEVICEID_MRVL_8062X,
191 "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" },
192 { VENDORID_MARVELL, DEVICEID_MRVL_8035,
193 "Marvell Yukon 88E8035 Gigabit Ethernet" },
194 { VENDORID_MARVELL, DEVICEID_MRVL_8036,
195 "Marvell Yukon 88E8036 Gigabit Ethernet" },
196 { VENDORID_MARVELL, DEVICEID_MRVL_8038,
197 "Marvell Yukon 88E8038 Gigabit Ethernet" },
198 { VENDORID_MARVELL, DEVICEID_MRVL_8039,
199 "Marvell Yukon 88E8039 Gigabit Ethernet" },
200 { VENDORID_MARVELL, DEVICEID_MRVL_4361,
201 "Marvell Yukon 88E8050 Gigabit Ethernet" },
202 { VENDORID_MARVELL, DEVICEID_MRVL_4360,
203 "Marvell Yukon 88E8052 Gigabit Ethernet" },
204 { VENDORID_MARVELL, DEVICEID_MRVL_4362,
205 "Marvell Yukon 88E8053 Gigabit Ethernet" },
206 { VENDORID_MARVELL, DEVICEID_MRVL_4363,
207 "Marvell Yukon 88E8055 Gigabit Ethernet" },
208 { VENDORID_MARVELL, DEVICEID_MRVL_4364,
209 "Marvell Yukon 88E8056 Gigabit Ethernet" },
210 { VENDORID_MARVELL, DEVICEID_MRVL_436A,
211 "Marvell Yukon 88E8058 Gigabit Ethernet" },
212 { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX,
213 "D-Link 550SX Gigabit Ethernet" },
214 { VENDORID_DLINK, DEVICEID_DLINK_DGE560T,
215 "D-Link 560T Gigabit Ethernet" }
218 static const char *model_name[] = {
226 static int mskc_probe(device_t);
227 static int mskc_attach(device_t);
228 static int mskc_detach(device_t);
229 static int mskc_shutdown(device_t);
230 static int mskc_setup_rambuffer(struct msk_softc *);
231 static int mskc_suspend(device_t);
232 static int mskc_resume(device_t);
233 static void mskc_reset(struct msk_softc *);
235 static int msk_probe(device_t);
236 static int msk_attach(device_t);
237 static int msk_detach(device_t);
239 static void msk_tick(void *);
240 static void msk_legacy_intr(void *);
241 static int msk_intr(void *);
242 static void msk_int_task(void *, int);
243 static void msk_intr_phy(struct msk_if_softc *);
244 static void msk_intr_gmac(struct msk_if_softc *);
245 static __inline void msk_rxput(struct msk_if_softc *);
246 static int msk_handle_events(struct msk_softc *);
247 static void msk_handle_hwerr(struct msk_if_softc *, uint32_t);
248 static void msk_intr_hwerr(struct msk_softc *);
249 #ifndef __NO_STRICT_ALIGNMENT
250 static __inline void msk_fixup_rx(struct mbuf *);
252 static void msk_rxeof(struct msk_if_softc *, uint32_t, int);
253 static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, int);
254 static void msk_txeof(struct msk_if_softc *, int);
255 static int msk_encap(struct msk_if_softc *, struct mbuf **);
256 static void msk_tx_task(void *, int);
257 static void msk_start(struct ifnet *);
258 static int msk_ioctl(struct ifnet *, u_long, caddr_t);
259 static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t);
260 static void msk_set_rambuffer(struct msk_if_softc *);
261 static void msk_init(void *);
262 static void msk_init_locked(struct msk_if_softc *);
263 static void msk_stop(struct msk_if_softc *);
264 static void msk_watchdog(struct msk_if_softc *);
265 static int msk_mediachange(struct ifnet *);
266 static void msk_mediastatus(struct ifnet *, struct ifmediareq *);
267 static void msk_phy_power(struct msk_softc *, int);
268 static void msk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
269 static int msk_status_dma_alloc(struct msk_softc *);
270 static void msk_status_dma_free(struct msk_softc *);
271 static int msk_txrx_dma_alloc(struct msk_if_softc *);
272 static int msk_rx_dma_jalloc(struct msk_if_softc *);
273 static void msk_txrx_dma_free(struct msk_if_softc *);
274 static void msk_rx_dma_jfree(struct msk_if_softc *);
275 static int msk_init_rx_ring(struct msk_if_softc *);
276 static int msk_init_jumbo_rx_ring(struct msk_if_softc *);
277 static void msk_init_tx_ring(struct msk_if_softc *);
278 static __inline void msk_discard_rxbuf(struct msk_if_softc *, int);
279 static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int);
280 static int msk_newbuf(struct msk_if_softc *, int);
281 static int msk_jumbo_newbuf(struct msk_if_softc *, int);
283 static int msk_phy_readreg(struct msk_if_softc *, int, int);
284 static int msk_phy_writereg(struct msk_if_softc *, int, int, int);
285 static int msk_miibus_readreg(device_t, int, int);
286 static int msk_miibus_writereg(device_t, int, int, int);
287 static void msk_miibus_statchg(device_t);
288 static void msk_link_task(void *, int);
290 static void msk_setmulti(struct msk_if_softc *);
291 static void msk_setvlan(struct msk_if_softc *, struct ifnet *);
292 static void msk_setpromisc(struct msk_if_softc *);
294 static void msk_stats_clear(struct msk_if_softc *);
295 static void msk_stats_update(struct msk_if_softc *);
296 static int msk_sysctl_stat32(SYSCTL_HANDLER_ARGS);
297 static int msk_sysctl_stat64(SYSCTL_HANDLER_ARGS);
298 static void msk_sysctl_node(struct msk_if_softc *);
299 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
300 static int sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS);
302 static device_method_t mskc_methods[] = {
303 /* Device interface */
304 DEVMETHOD(device_probe, mskc_probe),
305 DEVMETHOD(device_attach, mskc_attach),
306 DEVMETHOD(device_detach, mskc_detach),
307 DEVMETHOD(device_suspend, mskc_suspend),
308 DEVMETHOD(device_resume, mskc_resume),
309 DEVMETHOD(device_shutdown, mskc_shutdown),
312 DEVMETHOD(bus_print_child, bus_generic_print_child),
313 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
318 static driver_t mskc_driver = {
321 sizeof(struct msk_softc)
324 static devclass_t mskc_devclass;
326 static device_method_t msk_methods[] = {
327 /* Device interface */
328 DEVMETHOD(device_probe, msk_probe),
329 DEVMETHOD(device_attach, msk_attach),
330 DEVMETHOD(device_detach, msk_detach),
331 DEVMETHOD(device_shutdown, bus_generic_shutdown),
334 DEVMETHOD(bus_print_child, bus_generic_print_child),
335 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
338 DEVMETHOD(miibus_readreg, msk_miibus_readreg),
339 DEVMETHOD(miibus_writereg, msk_miibus_writereg),
340 DEVMETHOD(miibus_statchg, msk_miibus_statchg),
345 static driver_t msk_driver = {
348 sizeof(struct msk_if_softc)
351 static devclass_t msk_devclass;
353 DRIVER_MODULE(mskc, pci, mskc_driver, mskc_devclass, 0, 0);
354 DRIVER_MODULE(msk, mskc, msk_driver, msk_devclass, 0, 0);
355 DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, 0, 0);
357 static struct resource_spec msk_res_spec_io[] = {
358 { SYS_RES_IOPORT, PCIR_BAR(1), RF_ACTIVE },
362 static struct resource_spec msk_res_spec_mem[] = {
363 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
367 static struct resource_spec msk_irq_spec_legacy[] = {
368 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
372 static struct resource_spec msk_irq_spec_msi[] = {
373 { SYS_RES_IRQ, 1, RF_ACTIVE },
377 static struct resource_spec msk_irq_spec_msi2[] = {
378 { SYS_RES_IRQ, 1, RF_ACTIVE },
379 { SYS_RES_IRQ, 2, RF_ACTIVE },
384 msk_miibus_readreg(device_t dev, int phy, int reg)
386 struct msk_if_softc *sc_if;
388 if (phy != PHY_ADDR_MARV)
391 sc_if = device_get_softc(dev);
393 return (msk_phy_readreg(sc_if, phy, reg));
397 msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg)
399 struct msk_softc *sc;
402 sc = sc_if->msk_softc;
404 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
405 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
407 for (i = 0; i < MSK_TIMEOUT; i++) {
409 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL);
410 if ((val & GM_SMI_CT_RD_VAL) != 0) {
411 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA);
416 if (i == MSK_TIMEOUT) {
417 if_printf(sc_if->msk_ifp, "phy failed to come ready\n");
425 msk_miibus_writereg(device_t dev, int phy, int reg, int val)
427 struct msk_if_softc *sc_if;
429 if (phy != PHY_ADDR_MARV)
432 sc_if = device_get_softc(dev);
434 return (msk_phy_writereg(sc_if, phy, reg, val));
438 msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val)
440 struct msk_softc *sc;
443 sc = sc_if->msk_softc;
445 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val);
446 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
447 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg));
448 for (i = 0; i < MSK_TIMEOUT; i++) {
450 if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) &
451 GM_SMI_CT_BUSY) == 0)
454 if (i == MSK_TIMEOUT)
455 if_printf(sc_if->msk_ifp, "phy write timeout\n");
461 msk_miibus_statchg(device_t dev)
463 struct msk_if_softc *sc_if;
465 sc_if = device_get_softc(dev);
466 taskqueue_enqueue(taskqueue_swi, &sc_if->msk_link_task);
470 msk_link_task(void *arg, int pending)
472 struct msk_softc *sc;
473 struct msk_if_softc *sc_if;
474 struct mii_data *mii;
478 sc_if = (struct msk_if_softc *)arg;
479 sc = sc_if->msk_softc;
483 mii = device_get_softc(sc_if->msk_miibus);
484 ifp = sc_if->msk_ifp;
485 if (mii == NULL || ifp == NULL) {
486 MSK_IF_UNLOCK(sc_if);
490 if (mii->mii_media_status & IFM_ACTIVE) {
491 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
496 if (sc_if->msk_link != 0) {
497 /* Enable Tx FIFO Underrun. */
498 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK),
499 GM_IS_TX_FF_UR | GM_IS_RX_FF_OR);
501 * Because mii(4) notify msk(4) that it detected link status
502 * change, there is no need to enable automatic
503 * speed/flow-control/duplex updates.
505 gmac = GM_GPCR_AU_ALL_DIS;
506 switch (IFM_SUBTYPE(mii->mii_media_active)) {
509 gmac |= GM_GPCR_SPEED_1000;
512 gmac |= GM_GPCR_SPEED_100;
518 if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0)
519 gmac |= GM_GPCR_DUP_FULL;
520 /* Disable Rx flow control. */
521 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) == 0)
522 gmac |= GM_GPCR_FC_RX_DIS;
523 /* Disable Tx flow control. */
524 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) == 0)
525 gmac |= GM_GPCR_FC_TX_DIS;
526 gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
527 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
528 /* Read again to ensure writing. */
529 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
532 if (((mii->mii_media_active & IFM_GMASK) &
533 (IFM_FLAG0 | IFM_FLAG1)) == 0)
534 gmac = GMC_PAUSE_OFF;
535 /* Diable pause for 10/100 Mbps in half-duplex mode. */
536 if ((((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) &&
537 (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX ||
538 IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T))
539 gmac = GMC_PAUSE_OFF;
540 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac);
542 /* Enable PHY interrupt for FIFO underrun/overflow. */
543 msk_phy_writereg(sc_if, PHY_ADDR_MARV,
544 PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR);
547 * Link state changed to down.
548 * Disable PHY interrupts.
550 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
551 /* Disable Rx/Tx MAC. */
552 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
553 gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
554 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
555 /* Read again to ensure writing. */
556 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
559 MSK_IF_UNLOCK(sc_if);
563 msk_setmulti(struct msk_if_softc *sc_if)
565 struct msk_softc *sc;
567 struct ifmultiaddr *ifma;
572 sc = sc_if->msk_softc;
574 MSK_IF_LOCK_ASSERT(sc_if);
576 ifp = sc_if->msk_ifp;
578 bzero(mchash, sizeof(mchash));
579 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
580 mode |= GM_RXCR_UCF_ENA;
581 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
582 if ((ifp->if_flags & IFF_PROMISC) != 0)
583 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
584 else if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
590 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
591 if (ifma->ifma_addr->sa_family != AF_LINK)
593 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
594 ifma->ifma_addr), ETHER_ADDR_LEN);
595 /* Just want the 6 least significant bits. */
597 /* Set the corresponding bit in the hash table. */
598 mchash[crc >> 5] |= 1 << (crc & 0x1f);
601 mode |= GM_RXCR_MCF_ENA;
604 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1,
606 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2,
607 (mchash[0] >> 16) & 0xffff);
608 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3,
610 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4,
611 (mchash[1] >> 16) & 0xffff);
612 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
616 msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp)
618 struct msk_softc *sc;
620 sc = sc_if->msk_softc;
621 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
622 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
624 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
627 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
629 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
635 msk_setpromisc(struct msk_if_softc *sc_if)
637 struct msk_softc *sc;
641 MSK_IF_LOCK_ASSERT(sc_if);
643 sc = sc_if->msk_softc;
644 ifp = sc_if->msk_ifp;
646 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
647 if (ifp->if_flags & IFF_PROMISC)
648 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
650 mode |= (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
651 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
655 msk_init_rx_ring(struct msk_if_softc *sc_if)
657 struct msk_ring_data *rd;
658 struct msk_rxdesc *rxd;
661 MSK_IF_LOCK_ASSERT(sc_if);
663 sc_if->msk_cdata.msk_rx_cons = 0;
664 sc_if->msk_cdata.msk_rx_prod = 0;
665 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
667 rd = &sc_if->msk_rdata;
668 bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
669 prod = sc_if->msk_cdata.msk_rx_prod;
670 for (i = 0; i < MSK_RX_RING_CNT; i++) {
671 rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
673 rxd->rx_le = &rd->msk_rx_ring[prod];
674 if (msk_newbuf(sc_if, prod) != 0)
676 MSK_INC(prod, MSK_RX_RING_CNT);
679 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
680 sc_if->msk_cdata.msk_rx_ring_map,
681 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
683 /* Update prefetch unit. */
684 sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1;
685 CSR_WRITE_2(sc_if->msk_softc,
686 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
687 sc_if->msk_cdata.msk_rx_prod);
693 msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if)
695 struct msk_ring_data *rd;
696 struct msk_rxdesc *rxd;
699 MSK_IF_LOCK_ASSERT(sc_if);
701 sc_if->msk_cdata.msk_rx_cons = 0;
702 sc_if->msk_cdata.msk_rx_prod = 0;
703 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
705 rd = &sc_if->msk_rdata;
706 bzero(rd->msk_jumbo_rx_ring,
707 sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT);
708 prod = sc_if->msk_cdata.msk_rx_prod;
709 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
710 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
712 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
713 if (msk_jumbo_newbuf(sc_if, prod) != 0)
715 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
718 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
719 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
720 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
722 sc_if->msk_cdata.msk_rx_prod = MSK_JUMBO_RX_RING_CNT - 1;
723 CSR_WRITE_2(sc_if->msk_softc,
724 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
725 sc_if->msk_cdata.msk_rx_prod);
731 msk_init_tx_ring(struct msk_if_softc *sc_if)
733 struct msk_ring_data *rd;
734 struct msk_txdesc *txd;
737 sc_if->msk_cdata.msk_tso_mtu = 0;
738 sc_if->msk_cdata.msk_tx_prod = 0;
739 sc_if->msk_cdata.msk_tx_cons = 0;
740 sc_if->msk_cdata.msk_tx_cnt = 0;
742 rd = &sc_if->msk_rdata;
743 bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT);
744 for (i = 0; i < MSK_TX_RING_CNT; i++) {
745 txd = &sc_if->msk_cdata.msk_txdesc[i];
747 txd->tx_le = &rd->msk_tx_ring[i];
750 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
751 sc_if->msk_cdata.msk_tx_ring_map,
752 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
756 msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx)
758 struct msk_rx_desc *rx_le;
759 struct msk_rxdesc *rxd;
762 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
765 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
769 msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx)
771 struct msk_rx_desc *rx_le;
772 struct msk_rxdesc *rxd;
775 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
778 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
782 msk_newbuf(struct msk_if_softc *sc_if, int idx)
784 struct msk_rx_desc *rx_le;
785 struct msk_rxdesc *rxd;
787 bus_dma_segment_t segs[1];
791 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
795 m->m_len = m->m_pkthdr.len = MCLBYTES;
796 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
797 m_adj(m, ETHER_ALIGN);
798 #ifndef __NO_STRICT_ALIGNMENT
800 m_adj(m, MSK_RX_BUF_ALIGN);
803 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_rx_tag,
804 sc_if->msk_cdata.msk_rx_sparemap, m, segs, &nsegs,
805 BUS_DMA_NOWAIT) != 0) {
809 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
811 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
812 if (rxd->rx_m != NULL) {
813 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
814 BUS_DMASYNC_POSTREAD);
815 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap);
817 map = rxd->rx_dmamap;
818 rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap;
819 sc_if->msk_cdata.msk_rx_sparemap = map;
820 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
821 BUS_DMASYNC_PREREAD);
824 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
826 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
832 msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx)
834 struct msk_rx_desc *rx_le;
835 struct msk_rxdesc *rxd;
837 bus_dma_segment_t segs[1];
841 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
844 if ((m->m_flags & M_EXT) == 0) {
848 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
849 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
850 m_adj(m, ETHER_ALIGN);
851 #ifndef __NO_STRICT_ALIGNMENT
853 m_adj(m, MSK_RX_BUF_ALIGN);
856 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag,
857 sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs,
858 BUS_DMA_NOWAIT) != 0) {
862 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
864 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
865 if (rxd->rx_m != NULL) {
866 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
867 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
868 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
871 map = rxd->rx_dmamap;
872 rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap;
873 sc_if->msk_cdata.msk_jumbo_rx_sparemap = map;
874 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap,
875 BUS_DMASYNC_PREREAD);
878 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
880 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
889 msk_mediachange(struct ifnet *ifp)
891 struct msk_if_softc *sc_if;
892 struct mii_data *mii;
894 sc_if = ifp->if_softc;
897 mii = device_get_softc(sc_if->msk_miibus);
899 MSK_IF_UNLOCK(sc_if);
905 * Report current media status.
908 msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
910 struct msk_if_softc *sc_if;
911 struct mii_data *mii;
913 sc_if = ifp->if_softc;
915 mii = device_get_softc(sc_if->msk_miibus);
918 MSK_IF_UNLOCK(sc_if);
919 ifmr->ifm_active = mii->mii_media_active;
920 ifmr->ifm_status = mii->mii_media_status;
924 msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
926 struct msk_if_softc *sc_if;
928 struct mii_data *mii;
931 sc_if = ifp->if_softc;
932 ifr = (struct ifreq *)data;
937 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN)
939 else if (ifp->if_mtu != ifr->ifr_mtu) {
940 if ((sc_if->msk_flags & MSK_FLAG_NOJUMBO) != 0 &&
941 ifr->ifr_mtu > ETHERMTU)
945 ifp->if_mtu = ifr->ifr_mtu;
946 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
947 msk_init_locked(sc_if);
948 MSK_IF_UNLOCK(sc_if);
954 if ((ifp->if_flags & IFF_UP) != 0) {
955 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
956 if (((ifp->if_flags ^ sc_if->msk_if_flags)
957 & IFF_PROMISC) != 0) {
958 msk_setpromisc(sc_if);
962 if (sc_if->msk_detach == 0)
963 msk_init_locked(sc_if);
966 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
969 sc_if->msk_if_flags = ifp->if_flags;
970 MSK_IF_UNLOCK(sc_if);
975 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
977 MSK_IF_UNLOCK(sc_if);
981 mii = device_get_softc(sc_if->msk_miibus);
982 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
986 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
987 if ((mask & IFCAP_TXCSUM) != 0) {
988 ifp->if_capenable ^= IFCAP_TXCSUM;
989 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 &&
990 (IFCAP_TXCSUM & ifp->if_capabilities) != 0)
991 ifp->if_hwassist |= MSK_CSUM_FEATURES;
993 ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
995 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
996 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
997 msk_setvlan(sc_if, ifp);
1000 if ((mask & IFCAP_TSO4) != 0) {
1001 ifp->if_capenable ^= IFCAP_TSO4;
1002 if ((IFCAP_TSO4 & ifp->if_capenable) != 0 &&
1003 (IFCAP_TSO4 & ifp->if_capabilities) != 0)
1004 ifp->if_hwassist |= CSUM_TSO;
1006 ifp->if_hwassist &= ~CSUM_TSO;
1008 if (ifp->if_mtu > ETHERMTU &&
1009 sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) {
1011 * In Yukon EC Ultra, TSO & checksum offload is not
1012 * supported for jumbo frame.
1014 ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
1015 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
1018 VLAN_CAPABILITIES(ifp);
1019 MSK_IF_UNLOCK(sc_if);
1022 error = ether_ioctl(ifp, command, data);
1030 mskc_probe(device_t dev)
1032 struct msk_product *mp;
1033 uint16_t vendor, devid;
1036 vendor = pci_get_vendor(dev);
1037 devid = pci_get_device(dev);
1039 for (i = 0; i < sizeof(msk_products)/sizeof(msk_products[0]);
1041 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) {
1042 device_set_desc(dev, mp->msk_name);
1043 return (BUS_PROBE_DEFAULT);
1051 mskc_setup_rambuffer(struct msk_softc *sc)
1056 /* Get adapter SRAM size. */
1057 sc->msk_ramsize = CSR_READ_1(sc, B2_E_0) * 4;
1059 device_printf(sc->msk_dev,
1060 "RAM buffer size : %dKB\n", sc->msk_ramsize);
1061 if (sc->msk_ramsize == 0)
1064 sc->msk_pflags |= MSK_FLAG_RAMBUF;
1066 * Give receiver 2/3 of memory and round down to the multiple
1067 * of 1024. Tx/Rx RAM buffer size of Yukon II shoud be multiple
1070 sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024);
1071 sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize;
1072 for (i = 0, next = 0; i < sc->msk_num_port; i++) {
1073 sc->msk_rxqstart[i] = next;
1074 sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1;
1075 next = sc->msk_rxqend[i] + 1;
1076 sc->msk_txqstart[i] = next;
1077 sc->msk_txqend[i] = next + sc->msk_txqsize - 1;
1078 next = sc->msk_txqend[i] + 1;
1080 device_printf(sc->msk_dev,
1081 "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i,
1082 sc->msk_rxqsize / 1024, sc->msk_rxqstart[i],
1084 device_printf(sc->msk_dev,
1085 "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i,
1086 sc->msk_txqsize / 1024, sc->msk_txqstart[i],
1095 msk_phy_power(struct msk_softc *sc, int mode)
1101 case MSK_PHY_POWERUP:
1102 /* Switch power to VCC (WA for VAUX problem). */
1103 CSR_WRITE_1(sc, B0_POWER_CTRL,
1104 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
1105 /* Disable Core Clock Division, set Clock Select to 0. */
1106 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
1109 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1110 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1111 /* Enable bits are inverted. */
1112 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1113 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1114 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1117 * Enable PCI & Core Clock, enable clock gating for both Links.
1119 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1121 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1122 val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
1123 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1124 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1125 /* Deassert Low Power for 1st PHY. */
1126 val |= PCI_Y2_PHY1_COMA;
1127 if (sc->msk_num_port > 1)
1128 val |= PCI_Y2_PHY2_COMA;
1129 } else if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U) {
1132 CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON);
1134 /* Enable all clocks. */
1135 pci_write_config(sc->msk_dev, PCI_OUR_REG_3, 0, 4);
1136 our = pci_read_config(sc->msk_dev, PCI_OUR_REG_4, 4);
1137 our &= (PCI_FORCE_ASPM_REQUEST|PCI_ASPM_GPHY_LINK_DOWN|
1138 PCI_ASPM_INT_FIFO_EMPTY|PCI_ASPM_CLKRUN_REQUEST);
1139 /* Set all bits to 0 except bits 15..12. */
1140 pci_write_config(sc->msk_dev, PCI_OUR_REG_4, our, 4);
1141 /* Set to default value. */
1142 pci_write_config(sc->msk_dev, PCI_OUR_REG_5, 0, 4);
1144 /* Release PHY from PowerDown/COMA mode. */
1145 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1146 for (i = 0; i < sc->msk_num_port; i++) {
1147 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1149 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1153 case MSK_PHY_POWERDOWN:
1154 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1155 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD;
1156 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1157 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1158 val &= ~PCI_Y2_PHY1_COMA;
1159 if (sc->msk_num_port > 1)
1160 val &= ~PCI_Y2_PHY2_COMA;
1162 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1164 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1165 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1166 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1167 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1168 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1169 /* Enable bits are inverted. */
1173 * Disable PCI & Core Clock, disable clock gating for
1176 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1177 CSR_WRITE_1(sc, B0_POWER_CTRL,
1178 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
1186 mskc_reset(struct msk_softc *sc)
1193 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1196 if (sc->msk_hw_id < CHIP_ID_YUKON_XL) {
1197 CSR_WRITE_4(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
1198 CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE);
1201 * Since we disabled ASF, S/W reset is required for Power Management.
1203 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1204 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1206 /* Clear all error bits in the PCI status register. */
1207 status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
1208 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1210 pci_write_config(sc->msk_dev, PCIR_STATUS, status |
1211 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
1212 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
1213 CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR);
1215 switch (sc->msk_bustype) {
1217 /* Clear all PEX errors. */
1218 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
1219 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
1220 if ((val & PEX_RX_OV) != 0) {
1221 sc->msk_intrmask &= ~Y2_IS_HW_ERR;
1222 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
1227 /* Set Cache Line Size to 2(8bytes) if configured to 0. */
1228 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1);
1230 pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1);
1231 if (sc->msk_bustype == MSK_PCIX_BUS) {
1232 /* Set Cache Line Size opt. */
1233 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1235 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1239 /* Set PHY power state. */
1240 msk_phy_power(sc, MSK_PHY_POWERUP);
1242 /* Reset GPHY/GMAC Control */
1243 for (i = 0; i < sc->msk_num_port; i++) {
1244 /* GPHY Control reset. */
1245 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET);
1246 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR);
1247 /* GMAC Control reset. */
1248 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET);
1249 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR);
1250 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF);
1252 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1255 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON);
1257 /* Clear TWSI IRQ. */
1258 CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ);
1260 /* Turn off hardware timer. */
1261 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP);
1262 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ);
1264 /* Turn off descriptor polling. */
1265 CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP);
1267 /* Turn off time stamps. */
1268 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP);
1269 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
1271 /* Configure timeout values. */
1272 for (i = 0; i < sc->msk_num_port; i++) {
1273 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET);
1274 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
1275 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1),
1277 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1),
1279 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1),
1281 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1),
1283 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1),
1285 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1),
1287 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2),
1289 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2),
1291 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2),
1293 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2),
1295 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2),
1297 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2),
1301 /* Disable all interrupts. */
1302 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1303 CSR_READ_4(sc, B0_HWE_IMSK);
1304 CSR_WRITE_4(sc, B0_IMSK, 0);
1305 CSR_READ_4(sc, B0_IMSK);
1308 * On dual port PCI-X card, there is an problem where status
1309 * can be received out of order due to split transactions.
1311 if (sc->msk_bustype == MSK_PCIX_BUS && sc->msk_num_port > 1) {
1315 if (pci_find_extcap(sc->msk_dev, PCIY_PCIX, &pcix) == 0) {
1316 pcix_cmd = pci_read_config(sc->msk_dev, pcix + 2, 2);
1317 /* Clear Max Outstanding Split Transactions. */
1319 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1320 pci_write_config(sc->msk_dev, pcix + 2, pcix_cmd, 2);
1321 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1324 if (sc->msk_bustype == MSK_PEX_BUS) {
1327 v = pci_read_config(sc->msk_dev, PEX_DEV_CTRL, 2);
1328 /* Change Max. Read Request Size to 4096 bytes. */
1329 v &= ~PEX_DC_MAX_RRS_MSK;
1330 v |= PEX_DC_MAX_RD_RQ_SIZE(5);
1331 pci_write_config(sc->msk_dev, PEX_DEV_CTRL, v, 2);
1332 width = pci_read_config(sc->msk_dev, PEX_LNK_STAT, 2);
1333 width = (width & PEX_LS_LINK_WI_MSK) >> 4;
1334 v = pci_read_config(sc->msk_dev, PEX_LNK_CAP, 2);
1335 v = (v & PEX_LS_LINK_WI_MSK) >> 4;
1337 device_printf(sc->msk_dev,
1338 "negotiated width of link(x%d) != "
1339 "max. width of link(x%d)\n", width, v);
1342 /* Clear status list. */
1343 bzero(sc->msk_stat_ring,
1344 sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT);
1345 sc->msk_stat_cons = 0;
1346 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
1347 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1348 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET);
1349 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR);
1350 /* Set the status list base address. */
1351 addr = sc->msk_stat_ring_paddr;
1352 CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr));
1353 CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr));
1354 /* Set the status list last index. */
1355 CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1);
1356 if (sc->msk_hw_id == CHIP_ID_YUKON_EC &&
1357 sc->msk_hw_rev == CHIP_REV_YU_EC_A1) {
1358 /* WA for dev. #4.3 */
1359 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK);
1360 /* WA for dev. #4.18 */
1361 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21);
1362 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07);
1364 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a);
1365 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10);
1366 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1367 sc->msk_hw_rev == CHIP_REV_YU_XL_A0)
1368 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04);
1370 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10);
1371 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190);
1374 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
1376 CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000));
1378 /* Enable status unit. */
1379 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON);
1381 CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START);
1382 CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START);
1383 CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START);
1387 msk_probe(device_t dev)
1389 struct msk_softc *sc;
1392 sc = device_get_softc(device_get_parent(dev));
1394 * Not much to do here. We always know there will be
1395 * at least one GMAC present, and if there are two,
1396 * mskc_attach() will create a second device instance
1399 snprintf(desc, sizeof(desc),
1400 "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x",
1401 model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id,
1403 device_set_desc_copy(dev, desc);
1405 return (BUS_PROBE_DEFAULT);
1409 msk_attach(device_t dev)
1411 struct msk_softc *sc;
1412 struct msk_if_softc *sc_if;
1421 sc_if = device_get_softc(dev);
1422 sc = device_get_softc(device_get_parent(dev));
1423 port = *(int *)device_get_ivars(dev);
1425 sc_if->msk_if_dev = dev;
1426 sc_if->msk_port = port;
1427 sc_if->msk_softc = sc;
1428 sc_if->msk_flags = sc->msk_pflags;
1429 sc->msk_if[port] = sc_if;
1430 /* Setup Tx/Rx queue register offsets. */
1431 if (port == MSK_PORT_A) {
1432 sc_if->msk_txq = Q_XA1;
1433 sc_if->msk_txsq = Q_XS1;
1434 sc_if->msk_rxq = Q_R1;
1436 sc_if->msk_txq = Q_XA2;
1437 sc_if->msk_txsq = Q_XS2;
1438 sc_if->msk_rxq = Q_R2;
1441 callout_init_mtx(&sc_if->msk_tick_ch, &sc_if->msk_softc->msk_mtx, 0);
1442 TASK_INIT(&sc_if->msk_link_task, 0, msk_link_task, sc_if);
1443 msk_sysctl_node(sc_if);
1445 /* Disable jumbo frame for Yukon FE. */
1446 if (sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_FE)
1447 sc_if->msk_flags |= MSK_FLAG_NOJUMBO;
1449 if ((error = msk_txrx_dma_alloc(sc_if) != 0))
1451 msk_rx_dma_jalloc(sc_if);
1453 ifp = sc_if->msk_ifp = if_alloc(IFT_ETHER);
1455 device_printf(sc_if->msk_if_dev, "can not if_alloc()\n");
1459 ifp->if_softc = sc_if;
1460 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1461 ifp->if_mtu = ETHERMTU;
1462 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1464 * IFCAP_RXCSUM capability is intentionally disabled as the hardware
1465 * has serious bug in Rx checksum offload for all Yukon II family
1466 * hardware. It seems there is a workaround to make it work somtimes.
1467 * However, the workaround also have to check OP code sequences to
1468 * verify whether the OP code is correct. Sometimes it should compute
1469 * IP/TCP/UDP checksum in driver in order to verify correctness of
1470 * checksum computed by hardware. If you have to compute checksum
1471 * with software to verify the hardware's checksum why have hardware
1472 * compute the checksum? I think there is no reason to spend time to
1473 * make Rx checksum offload work on Yukon II hardware.
1475 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4;
1476 ifp->if_hwassist = MSK_CSUM_FEATURES | CSUM_TSO;
1477 ifp->if_capenable = ifp->if_capabilities;
1478 ifp->if_ioctl = msk_ioctl;
1479 ifp->if_start = msk_start;
1481 ifp->if_watchdog = NULL;
1482 ifp->if_init = msk_init;
1483 IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1);
1484 ifp->if_snd.ifq_drv_maxlen = MSK_TX_RING_CNT - 1;
1485 IFQ_SET_READY(&ifp->if_snd);
1487 TASK_INIT(&sc_if->msk_tx_task, 1, msk_tx_task, ifp);
1490 * Get station address for this interface. Note that
1491 * dual port cards actually come with three station
1492 * addresses: one for each port, plus an extra. The
1493 * extra one is used by the SysKonnect driver software
1494 * as a 'virtual' station address for when both ports
1495 * are operating in failover mode. Currently we don't
1496 * use this extra address.
1499 for (i = 0; i < ETHER_ADDR_LEN; i++)
1500 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i);
1503 * Call MI attach routine. Can't hold locks when calling into ether_*.
1505 MSK_IF_UNLOCK(sc_if);
1506 ether_ifattach(ifp, eaddr);
1510 * VLAN capability setup
1511 * Due to Tx checksum offload hardware bugs, msk(4) manually
1512 * computes checksum for short frames. For VLAN tagged frames
1513 * this workaround does not work so disable checksum offload
1514 * for VLAN interface.
1516 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
1517 ifp->if_capenable = ifp->if_capabilities;
1520 * Tell the upper layer(s) we support long frames.
1521 * Must appear after the call to ether_ifattach() because
1522 * ether_ifattach() sets ifi_hdrlen to the default value.
1524 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1529 MSK_IF_UNLOCK(sc_if);
1530 error = mii_phy_probe(dev, &sc_if->msk_miibus, msk_mediachange,
1533 device_printf(sc_if->msk_if_dev, "no PHY found!\n");
1534 ether_ifdetach(ifp);
1541 /* Access should be ok even though lock has been dropped */
1542 sc->msk_if[port] = NULL;
1550 * Attach the interface. Allocate softc structures, do ifmedia
1551 * setup and ethernet/BPF attach.
1554 mskc_attach(device_t dev)
1556 struct msk_softc *sc;
1557 int error, msic, msir, *port, reg;
1559 sc = device_get_softc(dev);
1561 mtx_init(&sc->msk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1565 * Map control/status registers.
1567 pci_enable_busmaster(dev);
1569 /* Allocate I/O resource */
1570 #ifdef MSK_USEIOSPACE
1571 sc->msk_res_spec = msk_res_spec_io;
1573 sc->msk_res_spec = msk_res_spec_mem;
1575 sc->msk_irq_spec = msk_irq_spec_legacy;
1576 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
1578 if (sc->msk_res_spec == msk_res_spec_mem)
1579 sc->msk_res_spec = msk_res_spec_io;
1581 sc->msk_res_spec = msk_res_spec_mem;
1582 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
1584 device_printf(dev, "couldn't allocate %s resources\n",
1585 sc->msk_res_spec == msk_res_spec_mem ? "memory" :
1587 mtx_destroy(&sc->msk_mtx);
1592 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1593 sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID);
1594 sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f;
1595 /* Bail out if chip is not recognized. */
1596 if (sc->msk_hw_id < CHIP_ID_YUKON_XL ||
1597 sc->msk_hw_id > CHIP_ID_YUKON_FE) {
1598 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n",
1599 sc->msk_hw_id, sc->msk_hw_rev);
1600 mtx_destroy(&sc->msk_mtx);
1604 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1605 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1606 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
1607 &sc->msk_process_limit, 0, sysctl_hw_msk_proc_limit, "I",
1608 "max number of Rx events to process");
1610 sc->msk_process_limit = MSK_PROC_DEFAULT;
1611 error = resource_int_value(device_get_name(dev), device_get_unit(dev),
1612 "process_limit", &sc->msk_process_limit);
1614 if (sc->msk_process_limit < MSK_PROC_MIN ||
1615 sc->msk_process_limit > MSK_PROC_MAX) {
1616 device_printf(dev, "process_limit value out of range; "
1617 "using default: %d\n", MSK_PROC_DEFAULT);
1618 sc->msk_process_limit = MSK_PROC_DEFAULT;
1623 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1624 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1625 sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP);
1626 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
1627 sc->msk_coppertype = 0;
1629 sc->msk_coppertype = 1;
1630 /* Check number of MACs. */
1631 sc->msk_num_port = 1;
1632 if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) ==
1634 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
1638 /* Check bus type. */
1639 if (pci_find_extcap(sc->msk_dev, PCIY_EXPRESS, ®) == 0)
1640 sc->msk_bustype = MSK_PEX_BUS;
1641 else if (pci_find_extcap(sc->msk_dev, PCIY_PCIX, ®) == 0)
1642 sc->msk_bustype = MSK_PCIX_BUS;
1644 sc->msk_bustype = MSK_PCI_BUS;
1646 switch (sc->msk_hw_id) {
1647 case CHIP_ID_YUKON_EC:
1648 case CHIP_ID_YUKON_EC_U:
1649 sc->msk_clock = 125; /* 125 Mhz */
1651 case CHIP_ID_YUKON_FE:
1652 sc->msk_clock = 100; /* 100 Mhz */
1654 case CHIP_ID_YUKON_XL:
1655 sc->msk_clock = 156; /* 156 Mhz */
1658 sc->msk_clock = 156; /* 156 Mhz */
1662 /* Allocate IRQ resources. */
1663 msic = pci_msi_count(dev);
1665 device_printf(dev, "MSI count : %d\n", msic);
1667 * The Yukon II reports it can handle two messages, one for each
1668 * possible port. We go ahead and allocate two messages and only
1669 * setup a handler for both if we have a dual port card.
1671 * XXX: I haven't untangled the interrupt handler to handle dual
1672 * port cards with separate MSI messages, so for now I disable MSI
1673 * on dual port cards.
1675 if (legacy_intr != 0)
1677 if (msi_disable == 0) {
1680 case 1: /* 88E8058 reports 1 MSI message */
1682 if (sc->msk_num_port == 1 &&
1683 pci_alloc_msi(dev, &msir) == 0) {
1686 sc->msk_irq_spec = msic == 2 ?
1690 pci_release_msi(dev);
1695 "Unexpected number of MSI messages : %d\n", msic);
1700 error = bus_alloc_resources(dev, sc->msk_irq_spec, sc->msk_irq);
1702 device_printf(dev, "couldn't allocate IRQ resources\n");
1706 if ((error = msk_status_dma_alloc(sc)) != 0)
1709 /* Set base interrupt mask. */
1710 sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU;
1711 sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR |
1712 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP;
1714 /* Reset the adapter. */
1717 if ((error = mskc_setup_rambuffer(sc)) != 0)
1720 sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1);
1721 if (sc->msk_devs[MSK_PORT_A] == NULL) {
1722 device_printf(dev, "failed to add child for PORT_A\n");
1726 port = malloc(sizeof(int), M_DEVBUF, M_WAITOK);
1728 device_printf(dev, "failed to allocate memory for "
1729 "ivars of PORT_A\n");
1734 device_set_ivars(sc->msk_devs[MSK_PORT_A], port);
1736 if (sc->msk_num_port > 1) {
1737 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1);
1738 if (sc->msk_devs[MSK_PORT_B] == NULL) {
1739 device_printf(dev, "failed to add child for PORT_B\n");
1743 port = malloc(sizeof(int), M_DEVBUF, M_WAITOK);
1745 device_printf(dev, "failed to allocate memory for "
1746 "ivars of PORT_B\n");
1751 device_set_ivars(sc->msk_devs[MSK_PORT_B], port);
1754 error = bus_generic_attach(dev);
1756 device_printf(dev, "failed to attach port(s)\n");
1760 /* Hook interrupt last to avoid having to lock softc. */
1762 error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET |
1763 INTR_MPSAFE, NULL, msk_legacy_intr, sc,
1764 &sc->msk_intrhand[0]);
1766 TASK_INIT(&sc->msk_int_task, 0, msk_int_task, sc);
1767 sc->msk_tq = taskqueue_create_fast("msk_taskq", M_WAITOK,
1768 taskqueue_thread_enqueue, &sc->msk_tq);
1769 taskqueue_start_threads(&sc->msk_tq, 1, PI_NET, "%s taskq",
1770 device_get_nameunit(sc->msk_dev));
1771 error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET |
1772 INTR_MPSAFE, msk_intr, NULL, sc, &sc->msk_intrhand[0]);
1776 device_printf(dev, "couldn't set up interrupt handler\n");
1777 if (legacy_intr == 0)
1778 taskqueue_free(sc->msk_tq);
1790 * Shutdown hardware and free up resources. This can be called any
1791 * time after the mutex has been initialized. It is called in both
1792 * the error case in attach and the normal detach case so it needs
1793 * to be careful about only freeing resources that have actually been
1797 msk_detach(device_t dev)
1799 struct msk_softc *sc;
1800 struct msk_if_softc *sc_if;
1803 sc_if = device_get_softc(dev);
1804 KASSERT(mtx_initialized(&sc_if->msk_softc->msk_mtx),
1805 ("msk mutex not initialized in msk_detach"));
1808 ifp = sc_if->msk_ifp;
1809 if (device_is_attached(dev)) {
1811 sc_if->msk_detach = 1;
1813 /* Can't hold locks while calling detach. */
1814 MSK_IF_UNLOCK(sc_if);
1815 callout_drain(&sc_if->msk_tick_ch);
1816 taskqueue_drain(taskqueue_fast, &sc_if->msk_tx_task);
1817 taskqueue_drain(taskqueue_swi, &sc_if->msk_link_task);
1818 ether_ifdetach(ifp);
1823 * We're generally called from mskc_detach() which is using
1824 * device_delete_child() to get to here. It's already trashed
1825 * miibus for us, so don't do it here or we'll panic.
1827 * if (sc_if->msk_miibus != NULL) {
1828 * device_delete_child(dev, sc_if->msk_miibus);
1829 * sc_if->msk_miibus = NULL;
1833 msk_rx_dma_jfree(sc_if);
1834 msk_txrx_dma_free(sc_if);
1835 bus_generic_detach(dev);
1839 sc = sc_if->msk_softc;
1840 sc->msk_if[sc_if->msk_port] = NULL;
1841 MSK_IF_UNLOCK(sc_if);
1847 mskc_detach(device_t dev)
1849 struct msk_softc *sc;
1851 sc = device_get_softc(dev);
1852 KASSERT(mtx_initialized(&sc->msk_mtx), ("msk mutex not initialized"));
1854 if (device_is_alive(dev)) {
1855 if (sc->msk_devs[MSK_PORT_A] != NULL) {
1856 free(device_get_ivars(sc->msk_devs[MSK_PORT_A]),
1858 device_delete_child(dev, sc->msk_devs[MSK_PORT_A]);
1860 if (sc->msk_devs[MSK_PORT_B] != NULL) {
1861 free(device_get_ivars(sc->msk_devs[MSK_PORT_B]),
1863 device_delete_child(dev, sc->msk_devs[MSK_PORT_B]);
1865 bus_generic_detach(dev);
1868 /* Disable all interrupts. */
1869 CSR_WRITE_4(sc, B0_IMSK, 0);
1870 CSR_READ_4(sc, B0_IMSK);
1871 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1872 CSR_READ_4(sc, B0_HWE_IMSK);
1875 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF);
1877 /* Put hardware reset. */
1878 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1880 msk_status_dma_free(sc);
1882 if (legacy_intr == 0 && sc->msk_tq != NULL) {
1883 taskqueue_drain(sc->msk_tq, &sc->msk_int_task);
1884 taskqueue_free(sc->msk_tq);
1887 if (sc->msk_intrhand[0]) {
1888 bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand[0]);
1889 sc->msk_intrhand[0] = NULL;
1891 if (sc->msk_intrhand[1]) {
1892 bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand[0]);
1893 sc->msk_intrhand[1] = NULL;
1895 bus_release_resources(dev, sc->msk_irq_spec, sc->msk_irq);
1897 pci_release_msi(dev);
1898 bus_release_resources(dev, sc->msk_res_spec, sc->msk_res);
1899 mtx_destroy(&sc->msk_mtx);
1904 struct msk_dmamap_arg {
1905 bus_addr_t msk_busaddr;
1909 msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1911 struct msk_dmamap_arg *ctx;
1916 ctx->msk_busaddr = segs[0].ds_addr;
1919 /* Create status DMA region. */
1921 msk_status_dma_alloc(struct msk_softc *sc)
1923 struct msk_dmamap_arg ctx;
1926 error = bus_dma_tag_create(
1927 bus_get_dma_tag(sc->msk_dev), /* parent */
1928 MSK_STAT_ALIGN, 0, /* alignment, boundary */
1929 BUS_SPACE_MAXADDR, /* lowaddr */
1930 BUS_SPACE_MAXADDR, /* highaddr */
1931 NULL, NULL, /* filter, filterarg */
1932 MSK_STAT_RING_SZ, /* maxsize */
1934 MSK_STAT_RING_SZ, /* maxsegsize */
1936 NULL, NULL, /* lockfunc, lockarg */
1939 device_printf(sc->msk_dev,
1940 "failed to create status DMA tag\n");
1944 /* Allocate DMA'able memory and load the DMA map for status ring. */
1945 error = bus_dmamem_alloc(sc->msk_stat_tag,
1946 (void **)&sc->msk_stat_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
1947 BUS_DMA_ZERO, &sc->msk_stat_map);
1949 device_printf(sc->msk_dev,
1950 "failed to allocate DMA'able memory for status ring\n");
1954 ctx.msk_busaddr = 0;
1955 error = bus_dmamap_load(sc->msk_stat_tag,
1956 sc->msk_stat_map, sc->msk_stat_ring, MSK_STAT_RING_SZ,
1957 msk_dmamap_cb, &ctx, 0);
1959 device_printf(sc->msk_dev,
1960 "failed to load DMA'able memory for status ring\n");
1963 sc->msk_stat_ring_paddr = ctx.msk_busaddr;
1969 msk_status_dma_free(struct msk_softc *sc)
1972 /* Destroy status block. */
1973 if (sc->msk_stat_tag) {
1974 if (sc->msk_stat_map) {
1975 bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map);
1976 if (sc->msk_stat_ring) {
1977 bus_dmamem_free(sc->msk_stat_tag,
1978 sc->msk_stat_ring, sc->msk_stat_map);
1979 sc->msk_stat_ring = NULL;
1981 sc->msk_stat_map = NULL;
1983 bus_dma_tag_destroy(sc->msk_stat_tag);
1984 sc->msk_stat_tag = NULL;
1989 msk_txrx_dma_alloc(struct msk_if_softc *sc_if)
1991 struct msk_dmamap_arg ctx;
1992 struct msk_txdesc *txd;
1993 struct msk_rxdesc *rxd;
1997 /* Create parent DMA tag. */
2000 * It seems that Yukon II supports full 64bits DMA operations. But
2001 * it needs two descriptors(list elements) for 64bits DMA operations.
2002 * Since we don't know what DMA address mappings(32bits or 64bits)
2003 * would be used in advance for each mbufs, we limits its DMA space
2004 * to be in range of 32bits address space. Otherwise, we should check
2005 * what DMA address is used and chain another descriptor for the
2006 * 64bits DMA operation. This also means descriptor ring size is
2007 * variable. Limiting DMA address to be in 32bit address space greatly
2008 * simplyfies descriptor handling and possibly would increase
2009 * performance a bit due to efficient handling of descriptors.
2010 * Apart from harassing checksum offloading mechanisms, it seems
2011 * it's really bad idea to use a seperate descriptor for 64bit
2012 * DMA operation to save small descriptor memory. Anyway, I've
2013 * never seen these exotic scheme on ethernet interface hardware.
2015 error = bus_dma_tag_create(
2016 bus_get_dma_tag(sc_if->msk_if_dev), /* parent */
2017 1, 0, /* alignment, boundary */
2018 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
2019 BUS_SPACE_MAXADDR, /* highaddr */
2020 NULL, NULL, /* filter, filterarg */
2021 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
2023 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
2025 NULL, NULL, /* lockfunc, lockarg */
2026 &sc_if->msk_cdata.msk_parent_tag);
2028 device_printf(sc_if->msk_if_dev,
2029 "failed to create parent DMA tag\n");
2032 /* Create tag for Tx ring. */
2033 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2034 MSK_RING_ALIGN, 0, /* alignment, boundary */
2035 BUS_SPACE_MAXADDR, /* lowaddr */
2036 BUS_SPACE_MAXADDR, /* highaddr */
2037 NULL, NULL, /* filter, filterarg */
2038 MSK_TX_RING_SZ, /* maxsize */
2040 MSK_TX_RING_SZ, /* maxsegsize */
2042 NULL, NULL, /* lockfunc, lockarg */
2043 &sc_if->msk_cdata.msk_tx_ring_tag);
2045 device_printf(sc_if->msk_if_dev,
2046 "failed to create Tx ring DMA tag\n");
2050 /* Create tag for Rx ring. */
2051 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2052 MSK_RING_ALIGN, 0, /* alignment, boundary */
2053 BUS_SPACE_MAXADDR, /* lowaddr */
2054 BUS_SPACE_MAXADDR, /* highaddr */
2055 NULL, NULL, /* filter, filterarg */
2056 MSK_RX_RING_SZ, /* maxsize */
2058 MSK_RX_RING_SZ, /* maxsegsize */
2060 NULL, NULL, /* lockfunc, lockarg */
2061 &sc_if->msk_cdata.msk_rx_ring_tag);
2063 device_printf(sc_if->msk_if_dev,
2064 "failed to create Rx ring DMA tag\n");
2068 /* Create tag for Tx buffers. */
2069 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2070 1, 0, /* alignment, boundary */
2071 BUS_SPACE_MAXADDR, /* lowaddr */
2072 BUS_SPACE_MAXADDR, /* highaddr */
2073 NULL, NULL, /* filter, filterarg */
2074 MSK_TSO_MAXSIZE, /* maxsize */
2075 MSK_MAXTXSEGS, /* nsegments */
2076 MSK_TSO_MAXSGSIZE, /* maxsegsize */
2078 NULL, NULL, /* lockfunc, lockarg */
2079 &sc_if->msk_cdata.msk_tx_tag);
2081 device_printf(sc_if->msk_if_dev,
2082 "failed to create Tx DMA tag\n");
2088 * Workaround hardware hang which seems to happen when Rx buffer
2089 * is not aligned on multiple of FIFO word(8 bytes).
2091 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
2092 rxalign = MSK_RX_BUF_ALIGN;
2093 /* Create tag for Rx buffers. */
2094 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2095 rxalign, 0, /* alignment, boundary */
2096 BUS_SPACE_MAXADDR, /* lowaddr */
2097 BUS_SPACE_MAXADDR, /* highaddr */
2098 NULL, NULL, /* filter, filterarg */
2099 MCLBYTES, /* maxsize */
2101 MCLBYTES, /* maxsegsize */
2103 NULL, NULL, /* lockfunc, lockarg */
2104 &sc_if->msk_cdata.msk_rx_tag);
2106 device_printf(sc_if->msk_if_dev,
2107 "failed to create Rx DMA tag\n");
2111 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
2112 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_tx_ring_tag,
2113 (void **)&sc_if->msk_rdata.msk_tx_ring, BUS_DMA_WAITOK |
2114 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_tx_ring_map);
2116 device_printf(sc_if->msk_if_dev,
2117 "failed to allocate DMA'able memory for Tx ring\n");
2121 ctx.msk_busaddr = 0;
2122 error = bus_dmamap_load(sc_if->msk_cdata.msk_tx_ring_tag,
2123 sc_if->msk_cdata.msk_tx_ring_map, sc_if->msk_rdata.msk_tx_ring,
2124 MSK_TX_RING_SZ, msk_dmamap_cb, &ctx, 0);
2126 device_printf(sc_if->msk_if_dev,
2127 "failed to load DMA'able memory for Tx ring\n");
2130 sc_if->msk_rdata.msk_tx_ring_paddr = ctx.msk_busaddr;
2132 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
2133 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_rx_ring_tag,
2134 (void **)&sc_if->msk_rdata.msk_rx_ring, BUS_DMA_WAITOK |
2135 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_rx_ring_map);
2137 device_printf(sc_if->msk_if_dev,
2138 "failed to allocate DMA'able memory for Rx ring\n");
2142 ctx.msk_busaddr = 0;
2143 error = bus_dmamap_load(sc_if->msk_cdata.msk_rx_ring_tag,
2144 sc_if->msk_cdata.msk_rx_ring_map, sc_if->msk_rdata.msk_rx_ring,
2145 MSK_RX_RING_SZ, msk_dmamap_cb, &ctx, 0);
2147 device_printf(sc_if->msk_if_dev,
2148 "failed to load DMA'able memory for Rx ring\n");
2151 sc_if->msk_rdata.msk_rx_ring_paddr = ctx.msk_busaddr;
2153 /* Create DMA maps for Tx buffers. */
2154 for (i = 0; i < MSK_TX_RING_CNT; i++) {
2155 txd = &sc_if->msk_cdata.msk_txdesc[i];
2157 txd->tx_dmamap = NULL;
2158 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0,
2161 device_printf(sc_if->msk_if_dev,
2162 "failed to create Tx dmamap\n");
2166 /* Create DMA maps for Rx buffers. */
2167 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2168 &sc_if->msk_cdata.msk_rx_sparemap)) != 0) {
2169 device_printf(sc_if->msk_if_dev,
2170 "failed to create spare Rx dmamap\n");
2173 for (i = 0; i < MSK_RX_RING_CNT; i++) {
2174 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2176 rxd->rx_dmamap = NULL;
2177 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2180 device_printf(sc_if->msk_if_dev,
2181 "failed to create Rx dmamap\n");
2191 msk_rx_dma_jalloc(struct msk_if_softc *sc_if)
2193 struct msk_dmamap_arg ctx;
2194 struct msk_rxdesc *jrxd;
2198 if (jumbo_disable != 0 || (sc_if->msk_flags & MSK_FLAG_NOJUMBO) != 0) {
2199 sc_if->msk_flags |= MSK_FLAG_NOJUMBO;
2200 device_printf(sc_if->msk_if_dev,
2201 "disabling jumbo frame support\n");
2202 sc_if->msk_flags |= MSK_FLAG_NOJUMBO;
2205 /* Create tag for jumbo Rx ring. */
2206 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2207 MSK_RING_ALIGN, 0, /* alignment, boundary */
2208 BUS_SPACE_MAXADDR, /* lowaddr */
2209 BUS_SPACE_MAXADDR, /* highaddr */
2210 NULL, NULL, /* filter, filterarg */
2211 MSK_JUMBO_RX_RING_SZ, /* maxsize */
2213 MSK_JUMBO_RX_RING_SZ, /* maxsegsize */
2215 NULL, NULL, /* lockfunc, lockarg */
2216 &sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2218 device_printf(sc_if->msk_if_dev,
2219 "failed to create jumbo Rx ring DMA tag\n");
2225 * Workaround hardware hang which seems to happen when Rx buffer
2226 * is not aligned on multiple of FIFO word(8 bytes).
2228 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
2229 rxalign = MSK_RX_BUF_ALIGN;
2230 /* Create tag for jumbo Rx buffers. */
2231 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2232 rxalign, 0, /* alignment, boundary */
2233 BUS_SPACE_MAXADDR, /* lowaddr */
2234 BUS_SPACE_MAXADDR, /* highaddr */
2235 NULL, NULL, /* filter, filterarg */
2236 MJUM9BYTES, /* maxsize */
2238 MJUM9BYTES, /* maxsegsize */
2240 NULL, NULL, /* lockfunc, lockarg */
2241 &sc_if->msk_cdata.msk_jumbo_rx_tag);
2243 device_printf(sc_if->msk_if_dev,
2244 "failed to create jumbo Rx DMA tag\n");
2248 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
2249 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2250 (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring,
2251 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
2252 &sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2254 device_printf(sc_if->msk_if_dev,
2255 "failed to allocate DMA'able memory for jumbo Rx ring\n");
2259 ctx.msk_busaddr = 0;
2260 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2261 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
2262 sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ,
2263 msk_dmamap_cb, &ctx, 0);
2265 device_printf(sc_if->msk_if_dev,
2266 "failed to load DMA'able memory for jumbo Rx ring\n");
2269 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr;
2271 /* Create DMA maps for jumbo Rx buffers. */
2272 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2273 &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) {
2274 device_printf(sc_if->msk_if_dev,
2275 "failed to create spare jumbo Rx dmamap\n");
2278 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2279 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2281 jrxd->rx_dmamap = NULL;
2282 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2285 device_printf(sc_if->msk_if_dev,
2286 "failed to create jumbo Rx dmamap\n");
2294 msk_rx_dma_jfree(sc_if);
2295 device_printf(sc_if->msk_if_dev, "disabling jumbo frame support "
2296 "due to resource shortage\n");
2297 sc_if->msk_flags |= MSK_FLAG_NOJUMBO;
2302 msk_txrx_dma_free(struct msk_if_softc *sc_if)
2304 struct msk_txdesc *txd;
2305 struct msk_rxdesc *rxd;
2309 if (sc_if->msk_cdata.msk_tx_ring_tag) {
2310 if (sc_if->msk_cdata.msk_tx_ring_map)
2311 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_ring_tag,
2312 sc_if->msk_cdata.msk_tx_ring_map);
2313 if (sc_if->msk_cdata.msk_tx_ring_map &&
2314 sc_if->msk_rdata.msk_tx_ring)
2315 bus_dmamem_free(sc_if->msk_cdata.msk_tx_ring_tag,
2316 sc_if->msk_rdata.msk_tx_ring,
2317 sc_if->msk_cdata.msk_tx_ring_map);
2318 sc_if->msk_rdata.msk_tx_ring = NULL;
2319 sc_if->msk_cdata.msk_tx_ring_map = NULL;
2320 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_ring_tag);
2321 sc_if->msk_cdata.msk_tx_ring_tag = NULL;
2324 if (sc_if->msk_cdata.msk_rx_ring_tag) {
2325 if (sc_if->msk_cdata.msk_rx_ring_map)
2326 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_ring_tag,
2327 sc_if->msk_cdata.msk_rx_ring_map);
2328 if (sc_if->msk_cdata.msk_rx_ring_map &&
2329 sc_if->msk_rdata.msk_rx_ring)
2330 bus_dmamem_free(sc_if->msk_cdata.msk_rx_ring_tag,
2331 sc_if->msk_rdata.msk_rx_ring,
2332 sc_if->msk_cdata.msk_rx_ring_map);
2333 sc_if->msk_rdata.msk_rx_ring = NULL;
2334 sc_if->msk_cdata.msk_rx_ring_map = NULL;
2335 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_ring_tag);
2336 sc_if->msk_cdata.msk_rx_ring_tag = NULL;
2339 if (sc_if->msk_cdata.msk_tx_tag) {
2340 for (i = 0; i < MSK_TX_RING_CNT; i++) {
2341 txd = &sc_if->msk_cdata.msk_txdesc[i];
2342 if (txd->tx_dmamap) {
2343 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag,
2345 txd->tx_dmamap = NULL;
2348 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag);
2349 sc_if->msk_cdata.msk_tx_tag = NULL;
2352 if (sc_if->msk_cdata.msk_rx_tag) {
2353 for (i = 0; i < MSK_RX_RING_CNT; i++) {
2354 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2355 if (rxd->rx_dmamap) {
2356 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2358 rxd->rx_dmamap = NULL;
2361 if (sc_if->msk_cdata.msk_rx_sparemap) {
2362 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2363 sc_if->msk_cdata.msk_rx_sparemap);
2364 sc_if->msk_cdata.msk_rx_sparemap = 0;
2366 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
2367 sc_if->msk_cdata.msk_rx_tag = NULL;
2369 if (sc_if->msk_cdata.msk_parent_tag) {
2370 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag);
2371 sc_if->msk_cdata.msk_parent_tag = NULL;
2376 msk_rx_dma_jfree(struct msk_if_softc *sc_if)
2378 struct msk_rxdesc *jrxd;
2381 /* Jumbo Rx ring. */
2382 if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) {
2383 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map)
2384 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2385 sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2386 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map &&
2387 sc_if->msk_rdata.msk_jumbo_rx_ring)
2388 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2389 sc_if->msk_rdata.msk_jumbo_rx_ring,
2390 sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2391 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL;
2392 sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL;
2393 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2394 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL;
2396 /* Jumbo Rx buffers. */
2397 if (sc_if->msk_cdata.msk_jumbo_rx_tag) {
2398 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2399 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2400 if (jrxd->rx_dmamap) {
2402 sc_if->msk_cdata.msk_jumbo_rx_tag,
2404 jrxd->rx_dmamap = NULL;
2407 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) {
2408 bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag,
2409 sc_if->msk_cdata.msk_jumbo_rx_sparemap);
2410 sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0;
2412 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag);
2413 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL;
2418 msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
2420 struct msk_txdesc *txd, *txd_last;
2421 struct msk_tx_desc *tx_le;
2424 bus_dma_segment_t txsegs[MSK_MAXTXSEGS];
2425 uint32_t control, prod, si;
2426 uint16_t offset, tcp_offset, tso_mtu;
2427 int error, i, nseg, tso;
2429 MSK_IF_LOCK_ASSERT(sc_if);
2431 tcp_offset = offset = 0;
2433 if ((m->m_pkthdr.csum_flags & (MSK_CSUM_FEATURES | CSUM_TSO)) != 0) {
2435 * Since mbuf has no protocol specific structure information
2436 * in it we have to inspect protocol information here to
2437 * setup TSO and checksum offload. I don't know why Marvell
2438 * made a such decision in chip design because other GigE
2439 * hardwares normally takes care of all these chores in
2440 * hardware. However, TSO performance of Yukon II is very
2441 * good such that it's worth to implement it.
2443 struct ether_header *eh;
2447 if (M_WRITABLE(m) == 0) {
2448 /* Get a writable copy. */
2449 m = m_dup(*m_head, M_DONTWAIT);
2458 offset = sizeof(struct ether_header);
2459 m = m_pullup(m, offset);
2464 eh = mtod(m, struct ether_header *);
2465 /* Check if hardware VLAN insertion is off. */
2466 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2467 offset = sizeof(struct ether_vlan_header);
2468 m = m_pullup(m, offset);
2474 m = m_pullup(m, offset + sizeof(struct ip));
2479 ip = (struct ip *)(mtod(m, char *) + offset);
2480 offset += (ip->ip_hl << 2);
2481 tcp_offset = offset;
2483 * It seems that Yukon II has Tx checksum offload bug for
2484 * small TCP packets that's less than 60 bytes in size
2485 * (e.g. TCP window probe packet, pure ACK packet).
2486 * Common work around like padding with zeros to make the
2487 * frame minimum ethernet frame size didn't work at all.
2488 * Instead of disabling checksum offload completely we
2489 * resort to S/W checksum routine when we encounter short
2491 * Short UDP packets appear to be handled correctly by
2494 if (m->m_pkthdr.len < MSK_MIN_FRAMELEN &&
2495 (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) {
2496 m = m_pullup(m, offset + sizeof(struct tcphdr));
2501 *(uint16_t *)(m->m_data + offset +
2502 m->m_pkthdr.csum_data) = in_cksum_skip(m,
2503 m->m_pkthdr.len, offset);
2504 m->m_pkthdr.csum_flags &= ~CSUM_TCP;
2506 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2507 m = m_pullup(m, offset + sizeof(struct tcphdr));
2512 tcp = (struct tcphdr *)(mtod(m, char *) + offset);
2513 offset += (tcp->th_off << 2);
2518 prod = sc_if->msk_cdata.msk_tx_prod;
2519 txd = &sc_if->msk_cdata.msk_txdesc[prod];
2521 map = txd->tx_dmamap;
2522 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, map,
2523 *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2524 if (error == EFBIG) {
2525 m = m_collapse(*m_head, M_DONTWAIT, MSK_MAXTXSEGS);
2532 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag,
2533 map, *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2539 } else if (error != 0)
2547 /* Check number of available descriptors. */
2548 if (sc_if->msk_cdata.msk_tx_cnt + nseg >=
2549 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) {
2550 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map);
2558 /* Check TSO support. */
2559 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2560 tso_mtu = offset + m->m_pkthdr.tso_segsz;
2561 if (tso_mtu != sc_if->msk_cdata.msk_tso_mtu) {
2562 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2563 tx_le->msk_addr = htole32(tso_mtu);
2564 tx_le->msk_control = htole32(OP_LRGLEN | HW_OWNER);
2565 sc_if->msk_cdata.msk_tx_cnt++;
2566 MSK_INC(prod, MSK_TX_RING_CNT);
2567 sc_if->msk_cdata.msk_tso_mtu = tso_mtu;
2571 /* Check if we have a VLAN tag to insert. */
2572 if ((m->m_flags & M_VLANTAG) != 0) {
2574 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2575 tx_le->msk_addr = htole32(0);
2576 tx_le->msk_control = htole32(OP_VLAN | HW_OWNER |
2577 htons(m->m_pkthdr.ether_vtag));
2578 sc_if->msk_cdata.msk_tx_cnt++;
2579 MSK_INC(prod, MSK_TX_RING_CNT);
2581 tx_le->msk_control |= htole32(OP_VLAN |
2582 htons(m->m_pkthdr.ether_vtag));
2584 control |= INS_VLAN;
2586 /* Check if we have to handle checksum offload. */
2587 if (tso == 0 && (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) {
2588 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2589 tx_le->msk_addr = htole32(((tcp_offset + m->m_pkthdr.csum_data)
2590 & 0xffff) | ((uint32_t)tcp_offset << 16));
2591 tx_le->msk_control = htole32(1 << 16 | (OP_TCPLISW | HW_OWNER));
2592 control = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
2593 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2595 sc_if->msk_cdata.msk_tx_cnt++;
2596 MSK_INC(prod, MSK_TX_RING_CNT);
2600 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2601 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr));
2603 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2606 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2608 sc_if->msk_cdata.msk_tx_cnt++;
2609 MSK_INC(prod, MSK_TX_RING_CNT);
2611 for (i = 1; i < nseg; i++) {
2612 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2613 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr));
2614 tx_le->msk_control = htole32(txsegs[i].ds_len | control |
2615 OP_BUFFER | HW_OWNER);
2616 sc_if->msk_cdata.msk_tx_cnt++;
2617 MSK_INC(prod, MSK_TX_RING_CNT);
2619 /* Update producer index. */
2620 sc_if->msk_cdata.msk_tx_prod = prod;
2622 /* Set EOP on the last desciptor. */
2623 prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT;
2624 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2625 tx_le->msk_control |= htole32(EOP);
2627 /* Turn the first descriptor ownership to hardware. */
2628 tx_le = &sc_if->msk_rdata.msk_tx_ring[si];
2629 tx_le->msk_control |= htole32(HW_OWNER);
2631 txd = &sc_if->msk_cdata.msk_txdesc[prod];
2632 map = txd_last->tx_dmamap;
2633 txd_last->tx_dmamap = txd->tx_dmamap;
2634 txd->tx_dmamap = map;
2637 /* Sync descriptors. */
2638 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE);
2639 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
2640 sc_if->msk_cdata.msk_tx_ring_map,
2641 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2647 msk_tx_task(void *arg, int pending)
2656 msk_start(struct ifnet *ifp)
2658 struct msk_if_softc *sc_if;
2659 struct mbuf *m_head;
2662 sc_if = ifp->if_softc;
2666 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2667 IFF_DRV_RUNNING || sc_if->msk_link == 0) {
2668 MSK_IF_UNLOCK(sc_if);
2672 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2673 sc_if->msk_cdata.msk_tx_cnt <
2674 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); ) {
2675 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2679 * Pack the data into the transmit ring. If we
2680 * don't have room, set the OACTIVE flag and wait
2681 * for the NIC to drain the ring.
2683 if (msk_encap(sc_if, &m_head) != 0) {
2686 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2687 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2693 * If there's a BPF listener, bounce a copy of this frame
2696 ETHER_BPF_MTAP(ifp, m_head);
2701 CSR_WRITE_2(sc_if->msk_softc,
2702 Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG),
2703 sc_if->msk_cdata.msk_tx_prod);
2705 /* Set a timeout in case the chip goes out to lunch. */
2706 sc_if->msk_watchdog_timer = MSK_TX_TIMEOUT;
2709 MSK_IF_UNLOCK(sc_if);
2713 msk_watchdog(struct msk_if_softc *sc_if)
2719 MSK_IF_LOCK_ASSERT(sc_if);
2721 if (sc_if->msk_watchdog_timer == 0 || --sc_if->msk_watchdog_timer)
2723 ifp = sc_if->msk_ifp;
2724 if (sc_if->msk_link == 0) {
2726 if_printf(sc_if->msk_ifp, "watchdog timeout "
2729 msk_init_locked(sc_if);
2734 * Reclaim first as there is a possibility of losing Tx completion
2737 ridx = sc_if->msk_port == MSK_PORT_A ? STAT_TXA1_RIDX : STAT_TXA2_RIDX;
2738 idx = CSR_READ_2(sc_if->msk_softc, ridx);
2739 if (sc_if->msk_cdata.msk_tx_cons != idx) {
2740 msk_txeof(sc_if, idx);
2741 if (sc_if->msk_cdata.msk_tx_cnt == 0) {
2742 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2744 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2745 taskqueue_enqueue(taskqueue_fast,
2746 &sc_if->msk_tx_task);
2751 if_printf(ifp, "watchdog timeout\n");
2753 msk_init_locked(sc_if);
2754 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2755 taskqueue_enqueue(taskqueue_fast, &sc_if->msk_tx_task);
2759 mskc_shutdown(device_t dev)
2761 struct msk_softc *sc;
2764 sc = device_get_softc(dev);
2766 for (i = 0; i < sc->msk_num_port; i++) {
2767 if (sc->msk_if[i] != NULL)
2768 msk_stop(sc->msk_if[i]);
2771 /* Disable all interrupts. */
2772 CSR_WRITE_4(sc, B0_IMSK, 0);
2773 CSR_READ_4(sc, B0_IMSK);
2774 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
2775 CSR_READ_4(sc, B0_HWE_IMSK);
2777 /* Put hardware reset. */
2778 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2785 mskc_suspend(device_t dev)
2787 struct msk_softc *sc;
2790 sc = device_get_softc(dev);
2794 for (i = 0; i < sc->msk_num_port; i++) {
2795 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
2796 ((sc->msk_if[i]->msk_ifp->if_drv_flags &
2797 IFF_DRV_RUNNING) != 0))
2798 msk_stop(sc->msk_if[i]);
2801 /* Disable all interrupts. */
2802 CSR_WRITE_4(sc, B0_IMSK, 0);
2803 CSR_READ_4(sc, B0_IMSK);
2804 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
2805 CSR_READ_4(sc, B0_HWE_IMSK);
2807 msk_phy_power(sc, MSK_PHY_POWERDOWN);
2809 /* Put hardware reset. */
2810 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2811 sc->msk_suspended = 1;
2819 mskc_resume(device_t dev)
2821 struct msk_softc *sc;
2824 sc = device_get_softc(dev);
2829 for (i = 0; i < sc->msk_num_port; i++) {
2830 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
2831 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0))
2832 msk_init_locked(sc->msk_if[i]);
2834 sc->msk_suspended = 0;
2841 #ifndef __NO_STRICT_ALIGNMENT
2842 static __inline void
2843 msk_fixup_rx(struct mbuf *m)
2846 uint16_t *src, *dst;
2848 src = mtod(m, uint16_t *);
2851 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
2854 m->m_data -= (MSK_RX_BUF_ALIGN - ETHER_ALIGN);
2859 msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len)
2863 struct msk_rxdesc *rxd;
2866 ifp = sc_if->msk_ifp;
2868 MSK_IF_LOCK_ASSERT(sc_if);
2870 cons = sc_if->msk_cdata.msk_rx_cons;
2872 rxlen = status >> 16;
2873 if ((status & GMR_FS_VLAN) != 0 &&
2874 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2875 rxlen -= ETHER_VLAN_ENCAP_LEN;
2876 if (len > sc_if->msk_framesize ||
2877 ((status & GMR_FS_ANY_ERR) != 0) ||
2878 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
2879 /* Don't count flow-control packet as errors. */
2880 if ((status & GMR_FS_GOOD_FC) == 0)
2882 msk_discard_rxbuf(sc_if, cons);
2885 rxd = &sc_if->msk_cdata.msk_rxdesc[cons];
2887 if (msk_newbuf(sc_if, cons) != 0) {
2889 /* Reuse old buffer. */
2890 msk_discard_rxbuf(sc_if, cons);
2893 m->m_pkthdr.rcvif = ifp;
2894 m->m_pkthdr.len = m->m_len = len;
2895 #ifndef __NO_STRICT_ALIGNMENT
2896 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
2900 /* Check for VLAN tagged packets. */
2901 if ((status & GMR_FS_VLAN) != 0 &&
2902 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2903 m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
2904 m->m_flags |= M_VLANTAG;
2906 MSK_IF_UNLOCK(sc_if);
2907 (*ifp->if_input)(ifp, m);
2911 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
2912 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT);
2916 msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len)
2920 struct msk_rxdesc *jrxd;
2923 ifp = sc_if->msk_ifp;
2925 MSK_IF_LOCK_ASSERT(sc_if);
2927 cons = sc_if->msk_cdata.msk_rx_cons;
2929 rxlen = status >> 16;
2930 if ((status & GMR_FS_VLAN) != 0 &&
2931 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2932 rxlen -= ETHER_VLAN_ENCAP_LEN;
2933 if (len > sc_if->msk_framesize ||
2934 ((status & GMR_FS_ANY_ERR) != 0) ||
2935 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
2936 /* Don't count flow-control packet as errors. */
2937 if ((status & GMR_FS_GOOD_FC) == 0)
2939 msk_discard_jumbo_rxbuf(sc_if, cons);
2942 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons];
2944 if (msk_jumbo_newbuf(sc_if, cons) != 0) {
2946 /* Reuse old buffer. */
2947 msk_discard_jumbo_rxbuf(sc_if, cons);
2950 m->m_pkthdr.rcvif = ifp;
2951 m->m_pkthdr.len = m->m_len = len;
2952 #ifndef __NO_STRICT_ALIGNMENT
2953 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
2957 /* Check for VLAN tagged packets. */
2958 if ((status & GMR_FS_VLAN) != 0 &&
2959 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2960 m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
2961 m->m_flags |= M_VLANTAG;
2963 MSK_IF_UNLOCK(sc_if);
2964 (*ifp->if_input)(ifp, m);
2968 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
2969 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT);
2973 msk_txeof(struct msk_if_softc *sc_if, int idx)
2975 struct msk_txdesc *txd;
2976 struct msk_tx_desc *cur_tx;
2981 MSK_IF_LOCK_ASSERT(sc_if);
2983 ifp = sc_if->msk_ifp;
2985 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
2986 sc_if->msk_cdata.msk_tx_ring_map,
2987 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2989 * Go through our tx ring and free mbufs for those
2990 * frames that have been sent.
2992 cons = sc_if->msk_cdata.msk_tx_cons;
2994 for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) {
2995 if (sc_if->msk_cdata.msk_tx_cnt <= 0)
2998 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons];
2999 control = le32toh(cur_tx->msk_control);
3000 sc_if->msk_cdata.msk_tx_cnt--;
3001 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3002 if ((control & EOP) == 0)
3004 txd = &sc_if->msk_cdata.msk_txdesc[cons];
3005 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap,
3006 BUS_DMASYNC_POSTWRITE);
3007 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap);
3010 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!",
3017 sc_if->msk_cdata.msk_tx_cons = cons;
3018 if (sc_if->msk_cdata.msk_tx_cnt == 0)
3019 sc_if->msk_watchdog_timer = 0;
3020 /* No need to sync LEs as we didn't update LEs. */
3025 msk_tick(void *xsc_if)
3027 struct msk_if_softc *sc_if;
3028 struct mii_data *mii;
3032 MSK_IF_LOCK_ASSERT(sc_if);
3034 mii = device_get_softc(sc_if->msk_miibus);
3037 msk_watchdog(sc_if);
3038 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
3042 msk_intr_phy(struct msk_if_softc *sc_if)
3046 msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
3047 status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
3048 /* Handle FIFO Underrun/Overflow? */
3049 if ((status & PHY_M_IS_FIFO_ERROR))
3050 device_printf(sc_if->msk_if_dev,
3051 "PHY FIFO underrun/overflow.\n");
3055 msk_intr_gmac(struct msk_if_softc *sc_if)
3057 struct msk_softc *sc;
3060 sc = sc_if->msk_softc;
3061 status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3063 /* GMAC Rx FIFO overrun. */
3064 if ((status & GM_IS_RX_FF_OR) != 0) {
3065 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
3067 device_printf(sc_if->msk_if_dev, "Rx FIFO overrun!\n");
3069 /* GMAC Tx FIFO underrun. */
3070 if ((status & GM_IS_TX_FF_UR) != 0) {
3071 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3073 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n");
3076 * In case of Tx underrun, we may need to flush/reset
3077 * Tx MAC but that would also require resynchronization
3078 * with status LEs. Reintializing status LEs would
3079 * affect other port in dual MAC configuration so it
3080 * should be avoided as possible as we can.
3081 * Due to lack of documentation it's all vague guess but
3082 * it needs more investigation.
3088 msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status)
3090 struct msk_softc *sc;
3092 sc = sc_if->msk_softc;
3093 if ((status & Y2_IS_PAR_RD1) != 0) {
3094 device_printf(sc_if->msk_if_dev,
3095 "RAM buffer read parity error\n");
3097 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3100 if ((status & Y2_IS_PAR_WR1) != 0) {
3101 device_printf(sc_if->msk_if_dev,
3102 "RAM buffer write parity error\n");
3104 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3107 if ((status & Y2_IS_PAR_MAC1) != 0) {
3108 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n");
3110 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3113 if ((status & Y2_IS_PAR_RX1) != 0) {
3114 device_printf(sc_if->msk_if_dev, "Rx parity error\n");
3116 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR);
3118 if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) {
3119 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n");
3121 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP);
3126 msk_intr_hwerr(struct msk_softc *sc)
3129 uint32_t tlphead[4];
3131 status = CSR_READ_4(sc, B0_HWE_ISRC);
3132 /* Time Stamp timer overflow. */
3133 if ((status & Y2_IS_TIST_OV) != 0)
3134 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
3135 if ((status & Y2_IS_PCI_NEXP) != 0) {
3137 * PCI Express Error occured which is not described in PEX
3139 * This error is also mapped either to Master Abort(
3140 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
3141 * can only be cleared there.
3143 device_printf(sc->msk_dev,
3144 "PCI Express protocol violation error\n");
3147 if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) {
3150 if ((status & Y2_IS_MST_ERR) != 0)
3151 device_printf(sc->msk_dev,
3152 "unexpected IRQ Status error\n");
3154 device_printf(sc->msk_dev,
3155 "unexpected IRQ Master error\n");
3156 /* Reset all bits in the PCI status register. */
3157 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
3158 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3159 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 |
3160 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
3161 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
3162 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3165 /* Check for PCI Express Uncorrectable Error. */
3166 if ((status & Y2_IS_PCI_EXP) != 0) {
3170 * On PCI Express bus bridges are called root complexes (RC).
3171 * PCI Express errors are recognized by the root complex too,
3172 * which requests the system to handle the problem. After
3173 * error occurence it may be that no access to the adapter
3174 * may be performed any longer.
3177 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
3178 if ((v32 & PEX_UNSUP_REQ) != 0) {
3179 /* Ignore unsupported request error. */
3180 device_printf(sc->msk_dev,
3181 "Uncorrectable PCI Express error\n");
3183 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) {
3186 /* Get TLP header form Log Registers. */
3187 for (i = 0; i < 4; i++)
3188 tlphead[i] = CSR_PCI_READ_4(sc,
3189 PEX_HEADER_LOG + i * 4);
3190 /* Check for vendor defined broadcast message. */
3191 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) {
3192 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
3193 CSR_WRITE_4(sc, B0_HWE_IMSK,
3194 sc->msk_intrhwemask);
3195 CSR_READ_4(sc, B0_HWE_IMSK);
3198 /* Clear the interrupt. */
3199 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3200 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
3201 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3204 if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL)
3205 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status);
3206 if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL)
3207 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8);
3210 static __inline void
3211 msk_rxput(struct msk_if_softc *sc_if)
3213 struct msk_softc *sc;
3215 sc = sc_if->msk_softc;
3216 if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN))
3218 sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
3219 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
3220 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3223 sc_if->msk_cdata.msk_rx_ring_tag,
3224 sc_if->msk_cdata.msk_rx_ring_map,
3225 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3226 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq,
3227 PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod);
3231 msk_handle_events(struct msk_softc *sc)
3233 struct msk_if_softc *sc_if;
3235 struct msk_stat_desc *sd;
3236 uint32_t control, status;
3237 int cons, idx, len, port, rxprog;
3239 idx = CSR_READ_2(sc, STAT_PUT_IDX);
3240 if (idx == sc->msk_stat_cons)
3243 /* Sync status LEs. */
3244 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
3245 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3246 /* XXX Sync Rx LEs here. */
3248 rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0;
3251 for (cons = sc->msk_stat_cons; cons != idx;) {
3252 sd = &sc->msk_stat_ring[cons];
3253 control = le32toh(sd->msk_control);
3254 if ((control & HW_OWNER) == 0)
3257 * Marvell's FreeBSD driver updates status LE after clearing
3258 * HW_OWNER. However we don't have a way to sync single LE
3259 * with bus_dma(9) API. bus_dma(9) provides a way to sync
3260 * an entire DMA map. So don't sync LE until we have a better
3263 control &= ~HW_OWNER;
3264 sd->msk_control = htole32(control);
3265 status = le32toh(sd->msk_status);
3266 len = control & STLE_LEN_MASK;
3267 port = (control >> 16) & 0x01;
3268 sc_if = sc->msk_if[port];
3269 if (sc_if == NULL) {
3270 device_printf(sc->msk_dev, "invalid port opcode "
3271 "0x%08x\n", control & STLE_OP_MASK);
3275 switch (control & STLE_OP_MASK) {
3277 sc_if->msk_vtag = ntohs(len);
3280 sc_if->msk_vtag = ntohs(len);
3283 if (sc_if->msk_framesize >
3284 (MCLBYTES - MSK_RX_BUF_ALIGN))
3285 msk_jumbo_rxeof(sc_if, status, len);
3287 msk_rxeof(sc_if, status, len);
3290 * Because there is no way to sync single Rx LE
3291 * put the DMA sync operation off until the end of
3295 /* Update prefetch unit if we've passed water mark. */
3296 if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) {
3302 if (sc->msk_if[MSK_PORT_A] != NULL)
3303 msk_txeof(sc->msk_if[MSK_PORT_A],
3304 status & STLE_TXA1_MSKL);
3305 if (sc->msk_if[MSK_PORT_B] != NULL)
3306 msk_txeof(sc->msk_if[MSK_PORT_B],
3307 ((status & STLE_TXA2_MSKL) >>
3309 ((len & STLE_TXA2_MSKH) <<
3313 device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n",
3314 control & STLE_OP_MASK);
3317 MSK_INC(cons, MSK_STAT_RING_CNT);
3318 if (rxprog > sc->msk_process_limit)
3322 sc->msk_stat_cons = cons;
3323 /* XXX We should sync status LEs here. See above notes. */
3325 if (rxput[MSK_PORT_A] > 0)
3326 msk_rxput(sc->msk_if[MSK_PORT_A]);
3327 if (rxput[MSK_PORT_B] > 0)
3328 msk_rxput(sc->msk_if[MSK_PORT_B]);
3330 return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX));
3333 /* Legacy interrupt handler for shared interrupt. */
3335 msk_legacy_intr(void *xsc)
3337 struct msk_softc *sc;
3338 struct msk_if_softc *sc_if0, *sc_if1;
3339 struct ifnet *ifp0, *ifp1;
3345 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
3346 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
3347 if (status == 0 || status == 0xffffffff || sc->msk_suspended != 0 ||
3348 (status & sc->msk_intrmask) == 0) {
3349 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3353 sc_if0 = sc->msk_if[MSK_PORT_A];
3354 sc_if1 = sc->msk_if[MSK_PORT_B];
3357 ifp0 = sc_if0->msk_ifp;
3359 ifp1 = sc_if1->msk_ifp;
3361 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
3362 msk_intr_phy(sc_if0);
3363 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
3364 msk_intr_phy(sc_if1);
3365 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
3366 msk_intr_gmac(sc_if0);
3367 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
3368 msk_intr_gmac(sc_if1);
3369 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
3370 device_printf(sc->msk_dev, "Rx descriptor error\n");
3371 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
3372 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3373 CSR_READ_4(sc, B0_IMSK);
3375 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
3376 device_printf(sc->msk_dev, "Tx descriptor error\n");
3377 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
3378 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3379 CSR_READ_4(sc, B0_IMSK);
3381 if ((status & Y2_IS_HW_ERR) != 0)
3384 while (msk_handle_events(sc) != 0)
3386 if ((status & Y2_IS_STAT_BMU) != 0)
3387 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
3389 /* Reenable interrupts. */
3390 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3392 if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3393 !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
3394 taskqueue_enqueue(taskqueue_fast, &sc_if0->msk_tx_task);
3395 if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3396 !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
3397 taskqueue_enqueue(taskqueue_fast, &sc_if1->msk_tx_task);
3405 struct msk_softc *sc;
3409 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
3410 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
3411 if (status == 0 || status == 0xffffffff) {
3412 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3413 return (FILTER_STRAY);
3416 taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task);
3417 return (FILTER_HANDLED);
3421 msk_int_task(void *arg, int pending)
3423 struct msk_softc *sc;
3424 struct msk_if_softc *sc_if0, *sc_if1;
3425 struct ifnet *ifp0, *ifp1;
3432 /* Get interrupt source. */
3433 status = CSR_READ_4(sc, B0_ISRC);
3434 if (status == 0 || status == 0xffffffff || sc->msk_suspended != 0 ||
3435 (status & sc->msk_intrmask) == 0)
3438 sc_if0 = sc->msk_if[MSK_PORT_A];
3439 sc_if1 = sc->msk_if[MSK_PORT_B];
3442 ifp0 = sc_if0->msk_ifp;
3444 ifp1 = sc_if1->msk_ifp;
3446 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
3447 msk_intr_phy(sc_if0);
3448 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
3449 msk_intr_phy(sc_if1);
3450 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
3451 msk_intr_gmac(sc_if0);
3452 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
3453 msk_intr_gmac(sc_if1);
3454 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
3455 device_printf(sc->msk_dev, "Rx descriptor error\n");
3456 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
3457 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3458 CSR_READ_4(sc, B0_IMSK);
3460 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
3461 device_printf(sc->msk_dev, "Tx descriptor error\n");
3462 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
3463 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3464 CSR_READ_4(sc, B0_IMSK);
3466 if ((status & Y2_IS_HW_ERR) != 0)
3469 domore = msk_handle_events(sc);
3470 if ((status & Y2_IS_STAT_BMU) != 0)
3471 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
3473 if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3474 !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
3475 taskqueue_enqueue(taskqueue_fast, &sc_if0->msk_tx_task);
3476 if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3477 !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
3478 taskqueue_enqueue(taskqueue_fast, &sc_if1->msk_tx_task);
3481 taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task);
3488 /* Reenable interrupts. */
3489 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3495 struct msk_if_softc *sc_if = xsc;
3498 msk_init_locked(sc_if);
3499 MSK_IF_UNLOCK(sc_if);
3503 msk_init_locked(struct msk_if_softc *sc_if)
3505 struct msk_softc *sc;
3507 struct mii_data *mii;
3508 uint16_t eaddr[ETHER_ADDR_LEN / 2];
3512 MSK_IF_LOCK_ASSERT(sc_if);
3514 ifp = sc_if->msk_ifp;
3515 sc = sc_if->msk_softc;
3516 mii = device_get_softc(sc_if->msk_miibus);
3519 /* Cancel pending I/O and free all Rx/Tx buffers. */
3522 if (ifp->if_mtu < ETHERMTU)
3523 sc_if->msk_framesize = ETHERMTU;
3525 sc_if->msk_framesize = ifp->if_mtu;
3526 sc_if->msk_framesize += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3527 if (ifp->if_mtu > ETHERMTU &&
3528 sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) {
3530 * In Yukon EC Ultra, TSO & checksum offload is not
3531 * supported for jumbo frame.
3533 ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
3534 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
3538 * Initialize GMAC first.
3539 * Without this initialization, Rx MAC did not work as expected
3540 * and Rx MAC garbled status LEs and it resulted in out-of-order
3541 * or duplicated frame delivery which in turn showed very poor
3542 * Rx performance.(I had to write a packet analysis code that
3543 * could be embeded in driver to diagnose this issue.)
3544 * I've spent almost 2 months to fix this issue. If I have had
3545 * datasheet for Yukon II I wouldn't have encountered this. :-(
3547 gmac = GM_GPCR_SPEED_100 | GM_GPCR_SPEED_1000 | GM_GPCR_DUP_FULL;
3548 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
3550 /* Dummy read the Interrupt Source Register. */
3551 CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3553 /* Clear MIB stats. */
3554 msk_stats_clear(sc_if);
3557 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS);
3559 /* Setup Transmit Control Register. */
3560 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
3562 /* Setup Transmit Flow Control Register. */
3563 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff);
3565 /* Setup Transmit Parameter Register. */
3566 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM,
3567 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
3568 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
3570 gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) |
3571 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
3573 if (ifp->if_mtu > ETHERMTU)
3574 gmac |= GM_SMOD_JUMBO_ENA;
3575 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac);
3577 /* Set station address. */
3578 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
3579 for (i = 0; i < ETHER_ADDR_LEN /2; i++)
3580 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L + i * 4,
3582 for (i = 0; i < ETHER_ADDR_LEN /2; i++)
3583 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L + i * 4,
3586 /* Disable interrupts for counter overflows. */
3587 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0);
3588 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0);
3589 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0);
3591 /* Configure Rx MAC FIFO. */
3592 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
3593 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR);
3594 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
3595 GMF_OPER_ON | GMF_RX_F_FL_ON);
3597 /* Set promiscuous mode. */
3598 msk_setpromisc(sc_if);
3600 /* Set multicast filter. */
3601 msk_setmulti(sc_if);
3603 /* Flush Rx MAC FIFO on any flow control or error. */
3604 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK),
3608 * Set Rx FIFO flush threshold to 64 bytes + 1 FIFO word
3609 * due to hardware hang on receipt of pause frames.
3611 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR),
3612 RX_GMF_FL_THR_DEF + 1);
3614 /* Configure Tx MAC FIFO. */
3615 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3616 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR);
3617 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON);
3619 /* Configure hardware VLAN tag insertion/stripping. */
3620 msk_setvlan(sc_if, ifp);
3622 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) {
3623 /* Set Rx Pause threshould. */
3624 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR),
3626 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR),
3628 if (ifp->if_mtu > ETHERMTU) {
3630 * Set Tx GMAC FIFO Almost Empty Threshold.
3632 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR),
3633 MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR);
3634 /* Disable Store & Forward mode for Tx. */
3635 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3636 TX_JUMBO_ENA | TX_STFW_DIS);
3638 /* Enable Store & Forward mode for Tx. */
3639 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3640 TX_JUMBO_DIS | TX_STFW_ENA);
3645 * Disable Force Sync bit and Alloc bit in Tx RAM interface
3646 * arbiter as we don't use Sync Tx queue.
3648 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL),
3649 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
3650 /* Enable the RAM Interface Arbiter. */
3651 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB);
3653 /* Setup RAM buffer. */
3654 msk_set_rambuffer(sc_if);
3656 /* Disable Tx sync Queue. */
3657 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET);
3659 /* Setup Tx Queue Bus Memory Interface. */
3660 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET);
3661 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT);
3662 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON);
3663 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM);
3664 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
3665 sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) {
3666 /* Fix for Yukon-EC Ultra: set BMU FIFO level */
3667 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL), MSK_ECU_TXFF_LEV);
3670 /* Setup Rx Queue Bus Memory Interface. */
3671 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET);
3672 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT);
3673 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON);
3674 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM);
3675 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
3676 sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) {
3677 /* MAC Rx RAM Read is controlled by hardware. */
3678 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS);
3681 msk_set_prefetch(sc, sc_if->msk_txq,
3682 sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1);
3683 msk_init_tx_ring(sc_if);
3685 /* Disable Rx checksum offload and RSS hash. */
3686 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
3687 BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH);
3688 if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN)) {
3689 msk_set_prefetch(sc, sc_if->msk_rxq,
3690 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr,
3691 MSK_JUMBO_RX_RING_CNT - 1);
3692 error = msk_init_jumbo_rx_ring(sc_if);
3694 msk_set_prefetch(sc, sc_if->msk_rxq,
3695 sc_if->msk_rdata.msk_rx_ring_paddr,
3696 MSK_RX_RING_CNT - 1);
3697 error = msk_init_rx_ring(sc_if);
3700 device_printf(sc_if->msk_if_dev,
3701 "initialization failed: no memory for Rx buffers\n");
3706 /* Configure interrupt handling. */
3707 if (sc_if->msk_port == MSK_PORT_A) {
3708 sc->msk_intrmask |= Y2_IS_PORT_A;
3709 sc->msk_intrhwemask |= Y2_HWE_L1_MASK;
3711 sc->msk_intrmask |= Y2_IS_PORT_B;
3712 sc->msk_intrhwemask |= Y2_HWE_L2_MASK;
3714 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
3715 CSR_READ_4(sc, B0_HWE_IMSK);
3716 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3717 CSR_READ_4(sc, B0_IMSK);
3719 sc_if->msk_link = 0;
3722 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3723 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3725 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
3729 msk_set_rambuffer(struct msk_if_softc *sc_if)
3731 struct msk_softc *sc;
3734 sc = sc_if->msk_softc;
3735 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
3738 /* Setup Rx Queue. */
3739 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR);
3740 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START),
3741 sc->msk_rxqstart[sc_if->msk_port] / 8);
3742 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END),
3743 sc->msk_rxqend[sc_if->msk_port] / 8);
3744 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP),
3745 sc->msk_rxqstart[sc_if->msk_port] / 8);
3746 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP),
3747 sc->msk_rxqstart[sc_if->msk_port] / 8);
3749 utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
3750 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8;
3751 ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
3752 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8;
3753 if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE)
3754 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8;
3755 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp);
3756 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp);
3757 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */
3759 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD);
3760 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL));
3762 /* Setup Tx Queue. */
3763 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR);
3764 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START),
3765 sc->msk_txqstart[sc_if->msk_port] / 8);
3766 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END),
3767 sc->msk_txqend[sc_if->msk_port] / 8);
3768 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP),
3769 sc->msk_txqstart[sc_if->msk_port] / 8);
3770 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP),
3771 sc->msk_txqstart[sc_if->msk_port] / 8);
3772 /* Enable Store & Forward for Tx side. */
3773 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD);
3774 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD);
3775 CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL));
3779 msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr,
3783 /* Reset the prefetch unit. */
3784 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3786 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3788 /* Set LE base address. */
3789 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG),
3791 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG),
3793 /* Set the list last index. */
3794 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG),
3796 /* Turn on prefetch unit. */
3797 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3799 /* Dummy read to ensure write. */
3800 CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG));
3804 msk_stop(struct msk_if_softc *sc_if)
3806 struct msk_softc *sc;
3807 struct msk_txdesc *txd;
3808 struct msk_rxdesc *rxd;
3809 struct msk_rxdesc *jrxd;
3814 MSK_IF_LOCK_ASSERT(sc_if);
3815 sc = sc_if->msk_softc;
3816 ifp = sc_if->msk_ifp;
3818 callout_stop(&sc_if->msk_tick_ch);
3819 sc_if->msk_watchdog_timer = 0;
3821 /* Disable interrupts. */
3822 if (sc_if->msk_port == MSK_PORT_A) {
3823 sc->msk_intrmask &= ~Y2_IS_PORT_A;
3824 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK;
3826 sc->msk_intrmask &= ~Y2_IS_PORT_B;
3827 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK;
3829 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
3830 CSR_READ_4(sc, B0_HWE_IMSK);
3831 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3832 CSR_READ_4(sc, B0_IMSK);
3834 /* Disable Tx/Rx MAC. */
3835 val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
3836 val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
3837 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val);
3838 /* Read again to ensure writing. */
3839 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
3840 /* Update stats and clear counters. */
3841 msk_stats_update(sc_if);
3844 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP);
3845 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
3846 for (i = 0; i < MSK_TIMEOUT; i++) {
3847 if ((val & (BMU_STOP | BMU_IDLE)) == 0) {
3848 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3850 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
3855 if (i == MSK_TIMEOUT)
3856 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n");
3857 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL),
3858 RB_RST_SET | RB_DIS_OP_MD);
3860 /* Disable all GMAC interrupt. */
3861 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0);
3862 /* Disable PHY interrupt. */
3863 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
3865 /* Disable the RAM Interface Arbiter. */
3866 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB);
3868 /* Reset the PCI FIFO of the async Tx queue */
3869 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3870 BMU_RST_SET | BMU_FIFO_RST);
3872 /* Reset the Tx prefetch units. */
3873 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG),
3876 /* Reset the RAM Buffer async Tx queue. */
3877 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET);
3879 /* Reset Tx MAC FIFO. */
3880 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3881 /* Set Pause Off. */
3882 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF);
3885 * The Rx Stop command will not work for Yukon-2 if the BMU does not
3886 * reach the end of packet and since we can't make sure that we have
3887 * incoming data, we must reset the BMU while it is not during a DMA
3888 * transfer. Since it is possible that the Rx path is still active,
3889 * the Rx RAM buffer will be stopped first, so any possible incoming
3890 * data will not trigger a DMA. After the RAM buffer is stopped, the
3891 * BMU is polled until any DMA in progress is ended and only then it
3895 /* Disable the RAM Buffer receive queue. */
3896 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD);
3897 for (i = 0; i < MSK_TIMEOUT; i++) {
3898 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) ==
3899 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL)))
3903 if (i == MSK_TIMEOUT)
3904 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n");
3905 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
3906 BMU_RST_SET | BMU_FIFO_RST);
3907 /* Reset the Rx prefetch unit. */
3908 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG),
3910 /* Reset the RAM Buffer receive queue. */
3911 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET);
3912 /* Reset Rx MAC FIFO. */
3913 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
3915 /* Free Rx and Tx mbufs still in the queues. */
3916 for (i = 0; i < MSK_RX_RING_CNT; i++) {
3917 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
3918 if (rxd->rx_m != NULL) {
3919 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag,
3920 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3921 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag,
3927 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
3928 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
3929 if (jrxd->rx_m != NULL) {
3930 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
3931 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3932 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
3934 m_freem(jrxd->rx_m);
3938 for (i = 0; i < MSK_TX_RING_CNT; i++) {
3939 txd = &sc_if->msk_cdata.msk_txdesc[i];
3940 if (txd->tx_m != NULL) {
3941 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag,
3942 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3943 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag,
3951 * Mark the interface down.
3953 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3954 sc_if->msk_link = 0;
3958 * When GM_PAR_MIB_CLR bit of GM_PHY_ADDR is set, reading lower
3959 * counter clears high 16 bits of the counter such that accessing
3960 * lower 16 bits should be the last operation.
3962 #define MSK_READ_MIB32(x, y) \
3963 (((uint32_t)GMAC_READ_2(sc, x, (y) + 4)) << 16) + \
3964 (uint32_t)GMAC_READ_2(sc, x, y)
3965 #define MSK_READ_MIB64(x, y) \
3966 (((uint64_t)MSK_READ_MIB32(x, (y) + 8)) << 32) + \
3967 (uint64_t)MSK_READ_MIB32(x, y)
3970 msk_stats_clear(struct msk_if_softc *sc_if)
3972 struct msk_softc *sc;
3977 MSK_IF_LOCK_ASSERT(sc_if);
3979 sc = sc_if->msk_softc;
3980 /* Set MIB Clear Counter Mode. */
3981 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
3982 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
3983 /* Read all MIB Counters with Clear Mode set. */
3984 for (i = GM_RXF_UC_OK; i <= GM_TXE_FIFO_UR; i++)
3985 reg = MSK_READ_MIB32(sc_if->msk_port, i);
3986 /* Clear MIB Clear Counter Mode. */
3987 gmac &= ~GM_PAR_MIB_CLR;
3988 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
3992 msk_stats_update(struct msk_if_softc *sc_if)
3994 struct msk_softc *sc;
3996 struct msk_hw_stats *stats;
4000 MSK_IF_LOCK_ASSERT(sc_if);
4002 ifp = sc_if->msk_ifp;
4003 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
4005 sc = sc_if->msk_softc;
4006 stats = &sc_if->msk_stats;
4007 /* Set MIB Clear Counter Mode. */
4008 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
4009 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
4012 stats->rx_ucast_frames +=
4013 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_UC_OK);
4014 stats->rx_bcast_frames +=
4015 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_BC_OK);
4016 stats->rx_pause_frames +=
4017 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MPAUSE);
4018 stats->rx_mcast_frames +=
4019 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MC_OK);
4020 stats->rx_crc_errs +=
4021 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_FCS_ERR);
4022 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE1);
4023 stats->rx_good_octets +=
4024 MSK_READ_MIB64(sc_if->msk_port, GM_RXO_OK_LO);
4025 stats->rx_bad_octets +=
4026 MSK_READ_MIB64(sc_if->msk_port, GM_RXO_ERR_LO);
4028 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SHT);
4029 stats->rx_runt_errs +=
4030 MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FRAG);
4031 stats->rx_pkts_64 +=
4032 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_64B);
4033 stats->rx_pkts_65_127 +=
4034 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_127B);
4035 stats->rx_pkts_128_255 +=
4036 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_255B);
4037 stats->rx_pkts_256_511 +=
4038 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_511B);
4039 stats->rx_pkts_512_1023 +=
4040 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1023B);
4041 stats->rx_pkts_1024_1518 +=
4042 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1518B);
4043 stats->rx_pkts_1519_max +=
4044 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MAX_SZ);
4045 stats->rx_pkts_too_long +=
4046 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_LNG_ERR);
4047 stats->rx_pkts_jabbers +=
4048 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_JAB_PKT);
4049 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE2);
4050 stats->rx_fifo_oflows +=
4051 MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FIFO_OV);
4052 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE3);
4055 stats->tx_ucast_frames +=
4056 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_UC_OK);
4057 stats->tx_bcast_frames +=
4058 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_BC_OK);
4059 stats->tx_pause_frames +=
4060 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MPAUSE);
4061 stats->tx_mcast_frames +=
4062 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MC_OK);
4064 MSK_READ_MIB64(sc_if->msk_port, GM_TXO_OK_LO);
4065 stats->tx_pkts_64 +=
4066 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_64B);
4067 stats->tx_pkts_65_127 +=
4068 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_127B);
4069 stats->tx_pkts_128_255 +=
4070 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_255B);
4071 stats->tx_pkts_256_511 +=
4072 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_511B);
4073 stats->tx_pkts_512_1023 +=
4074 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1023B);
4075 stats->tx_pkts_1024_1518 +=
4076 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1518B);
4077 stats->tx_pkts_1519_max +=
4078 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MAX_SZ);
4079 reg = MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SPARE1);
4081 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_COL);
4082 stats->tx_late_colls +=
4083 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_LAT_COL);
4084 stats->tx_excess_colls +=
4085 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_ABO_COL);
4086 stats->tx_multi_colls +=
4087 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MUL_COL);
4088 stats->tx_single_colls +=
4089 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SNG_COL);
4090 stats->tx_underflows +=
4091 MSK_READ_MIB32(sc_if->msk_port, GM_TXE_FIFO_UR);
4092 /* Clear MIB Clear Counter Mode. */
4093 gmac &= ~GM_PAR_MIB_CLR;
4094 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
4098 msk_sysctl_stat32(SYSCTL_HANDLER_ARGS)
4100 struct msk_softc *sc;
4101 struct msk_if_softc *sc_if;
4102 uint32_t result, *stat;
4105 sc_if = (struct msk_if_softc *)arg1;
4106 sc = sc_if->msk_softc;
4108 stat = (uint32_t *)((uint8_t *)&sc_if->msk_stats + off);
4111 result = MSK_READ_MIB32(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2);
4113 MSK_IF_UNLOCK(sc_if);
4115 return (sysctl_handle_int(oidp, &result, 0, req));
4119 msk_sysctl_stat64(SYSCTL_HANDLER_ARGS)
4121 struct msk_softc *sc;
4122 struct msk_if_softc *sc_if;
4123 uint64_t result, *stat;
4126 sc_if = (struct msk_if_softc *)arg1;
4127 sc = sc_if->msk_softc;
4129 stat = (uint64_t *)((uint8_t *)&sc_if->msk_stats + off);
4132 result = MSK_READ_MIB64(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2);
4134 MSK_IF_UNLOCK(sc_if);
4136 return (sysctl_handle_quad(oidp, &result, 0, req));
4139 #undef MSK_READ_MIB32
4140 #undef MSK_READ_MIB64
4142 #define MSK_SYSCTL_STAT32(sc, c, o, p, n, d) \
4143 SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_UINT | CTLFLAG_RD, \
4144 sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat32, \
4146 #define MSK_SYSCTL_STAT64(sc, c, o, p, n, d) \
4147 SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_UINT | CTLFLAG_RD, \
4148 sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat64, \
4152 msk_sysctl_node(struct msk_if_softc *sc_if)
4154 struct sysctl_ctx_list *ctx;
4155 struct sysctl_oid_list *child, *schild;
4156 struct sysctl_oid *tree;
4158 ctx = device_get_sysctl_ctx(sc_if->msk_if_dev);
4159 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc_if->msk_if_dev));
4161 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
4162 NULL, "MSK Statistics");
4163 schild = child = SYSCTL_CHILDREN(tree);
4164 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
4165 NULL, "MSK RX Statistics");
4166 child = SYSCTL_CHILDREN(tree);
4167 MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames",
4168 child, rx_ucast_frames, "Good unicast frames");
4169 MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames",
4170 child, rx_bcast_frames, "Good broadcast frames");
4171 MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames",
4172 child, rx_pause_frames, "Pause frames");
4173 MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames",
4174 child, rx_mcast_frames, "Multicast frames");
4175 MSK_SYSCTL_STAT32(sc_if, ctx, "crc_errs",
4176 child, rx_crc_errs, "CRC errors");
4177 MSK_SYSCTL_STAT64(sc_if, ctx, "good_octets",
4178 child, rx_good_octets, "Good octets");
4179 MSK_SYSCTL_STAT64(sc_if, ctx, "bad_octets",
4180 child, rx_bad_octets, "Bad octets");
4181 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64",
4182 child, rx_pkts_64, "64 bytes frames");
4183 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127",
4184 child, rx_pkts_65_127, "65 to 127 bytes frames");
4185 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255",
4186 child, rx_pkts_128_255, "128 to 255 bytes frames");
4187 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511",
4188 child, rx_pkts_256_511, "256 to 511 bytes frames");
4189 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023",
4190 child, rx_pkts_512_1023, "512 to 1023 bytes frames");
4191 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518",
4192 child, rx_pkts_1024_1518, "1024 to 1518 bytes frames");
4193 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max",
4194 child, rx_pkts_1519_max, "1519 to max frames");
4195 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_too_long",
4196 child, rx_pkts_too_long, "frames too long");
4197 MSK_SYSCTL_STAT32(sc_if, ctx, "jabbers",
4198 child, rx_pkts_jabbers, "Jabber errors");
4199 MSK_SYSCTL_STAT32(sc_if, ctx, "overflows",
4200 child, rx_fifo_oflows, "FIFO overflows");
4202 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
4203 NULL, "MSK TX Statistics");
4204 child = SYSCTL_CHILDREN(tree);
4205 MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames",
4206 child, tx_ucast_frames, "Unicast frames");
4207 MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames",
4208 child, tx_bcast_frames, "Broadcast frames");
4209 MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames",
4210 child, tx_pause_frames, "Pause frames");
4211 MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames",
4212 child, tx_mcast_frames, "Multicast frames");
4213 MSK_SYSCTL_STAT64(sc_if, ctx, "octets",
4214 child, tx_octets, "Octets");
4215 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64",
4216 child, tx_pkts_64, "64 bytes frames");
4217 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127",
4218 child, tx_pkts_65_127, "65 to 127 bytes frames");
4219 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255",
4220 child, tx_pkts_128_255, "128 to 255 bytes frames");
4221 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511",
4222 child, tx_pkts_256_511, "256 to 511 bytes frames");
4223 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023",
4224 child, tx_pkts_512_1023, "512 to 1023 bytes frames");
4225 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518",
4226 child, tx_pkts_1024_1518, "1024 to 1518 bytes frames");
4227 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max",
4228 child, tx_pkts_1519_max, "1519 to max frames");
4229 MSK_SYSCTL_STAT32(sc_if, ctx, "colls",
4230 child, tx_colls, "Collisions");
4231 MSK_SYSCTL_STAT32(sc_if, ctx, "late_colls",
4232 child, tx_late_colls, "Late collisions");
4233 MSK_SYSCTL_STAT32(sc_if, ctx, "excess_colls",
4234 child, tx_excess_colls, "Excessive collisions");
4235 MSK_SYSCTL_STAT32(sc_if, ctx, "multi_colls",
4236 child, tx_multi_colls, "Multiple collisions");
4237 MSK_SYSCTL_STAT32(sc_if, ctx, "single_colls",
4238 child, tx_single_colls, "Single collisions");
4239 MSK_SYSCTL_STAT32(sc_if, ctx, "underflows",
4240 child, tx_underflows, "FIFO underflows");
4243 #undef MSK_SYSCTL_STAT32
4244 #undef MSK_SYSCTL_STAT64
4247 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
4253 value = *(int *)arg1;
4254 error = sysctl_handle_int(oidp, &value, 0, req);
4255 if (error || !req->newptr)
4257 if (value < low || value > high)
4259 *(int *)arg1 = value;
4265 sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS)
4268 return (sysctl_int_range(oidp, arg1, arg2, req, MSK_PROC_MIN,