2 * Copyright (c) 2017 Stormshield.
3 * Copyright (c) 2017 Semihalf.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
28 #include "opt_platform.h"
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/endian.h>
37 #include <sys/mutex.h>
38 #include <sys/kernel.h>
39 #include <sys/module.h>
40 #include <sys/socket.h>
41 #include <sys/sysctl.h>
43 #include <sys/taskqueue.h>
48 #include <net/ethernet.h>
51 #include <net/if_arp.h>
52 #include <net/if_dl.h>
53 #include <net/if_media.h>
54 #include <net/if_types.h>
55 #include <net/if_vlan_var.h>
57 #include <netinet/in_systm.h>
58 #include <netinet/in.h>
59 #include <netinet/ip.h>
60 #include <netinet/tcp_lro.h>
62 #include <sys/sockio.h>
64 #include <machine/bus.h>
66 #include <machine/resource.h>
68 #include <dev/mii/mii.h>
69 #include <dev/mii/miivar.h>
71 #include <dev/ofw/openfirm.h>
72 #include <dev/ofw/ofw_bus.h>
73 #include <dev/ofw/ofw_bus_subr.h>
75 #include <dev/mdio/mdio.h>
77 #include <arm/mv/mvreg.h>
78 #include <arm/mv/mvvar.h>
79 #include <arm/mv/mvwin.h>
81 #include "if_mvnetareg.h"
82 #include "if_mvnetavar.h"
84 #include "miibus_if.h"
88 #define STATIC /* nothing */
93 #define DASSERT(x) KASSERT((x), (#x))
95 /* Device Register Initialization */
96 STATIC int mvneta_initreg(struct ifnet *);
98 /* Descriptor Ring Control for each of queues */
99 STATIC int mvneta_ring_alloc_rx_queue(struct mvneta_softc *, int);
100 STATIC int mvneta_ring_alloc_tx_queue(struct mvneta_softc *, int);
101 STATIC void mvneta_ring_dealloc_rx_queue(struct mvneta_softc *, int);
102 STATIC void mvneta_ring_dealloc_tx_queue(struct mvneta_softc *, int);
103 STATIC int mvneta_ring_init_rx_queue(struct mvneta_softc *, int);
104 STATIC int mvneta_ring_init_tx_queue(struct mvneta_softc *, int);
105 STATIC void mvneta_ring_flush_rx_queue(struct mvneta_softc *, int);
106 STATIC void mvneta_ring_flush_tx_queue(struct mvneta_softc *, int);
107 STATIC void mvneta_dmamap_cb(void *, bus_dma_segment_t *, int, int);
108 STATIC int mvneta_dma_create(struct mvneta_softc *);
110 /* Rx/Tx Queue Control */
111 STATIC int mvneta_rx_queue_init(struct ifnet *, int);
112 STATIC int mvneta_tx_queue_init(struct ifnet *, int);
113 STATIC int mvneta_rx_queue_enable(struct ifnet *, int);
114 STATIC int mvneta_tx_queue_enable(struct ifnet *, int);
115 STATIC void mvneta_rx_lockq(struct mvneta_softc *, int);
116 STATIC void mvneta_rx_unlockq(struct mvneta_softc *, int);
117 STATIC void mvneta_tx_lockq(struct mvneta_softc *, int);
118 STATIC void mvneta_tx_unlockq(struct mvneta_softc *, int);
120 /* Interrupt Handlers */
121 STATIC void mvneta_disable_intr(struct mvneta_softc *);
122 STATIC void mvneta_enable_intr(struct mvneta_softc *);
123 STATIC void mvneta_rxtxth_intr(void *);
124 STATIC int mvneta_misc_intr(struct mvneta_softc *);
125 STATIC void mvneta_tick(void *);
126 /* struct ifnet and mii callbacks*/
127 STATIC int mvneta_xmitfast_locked(struct mvneta_softc *, int, struct mbuf **);
128 STATIC int mvneta_xmit_locked(struct mvneta_softc *, int);
129 #ifdef MVNETA_MULTIQUEUE
130 STATIC int mvneta_transmit(struct ifnet *, struct mbuf *);
131 #else /* !MVNETA_MULTIQUEUE */
132 STATIC void mvneta_start(struct ifnet *);
134 STATIC void mvneta_qflush(struct ifnet *);
135 STATIC void mvneta_tx_task(void *, int);
136 STATIC int mvneta_ioctl(struct ifnet *, u_long, caddr_t);
137 STATIC void mvneta_init(void *);
138 STATIC void mvneta_init_locked(void *);
139 STATIC void mvneta_stop(struct mvneta_softc *);
140 STATIC void mvneta_stop_locked(struct mvneta_softc *);
141 STATIC int mvneta_mediachange(struct ifnet *);
142 STATIC void mvneta_mediastatus(struct ifnet *, struct ifmediareq *);
143 STATIC void mvneta_portup(struct mvneta_softc *);
144 STATIC void mvneta_portdown(struct mvneta_softc *);
146 /* Link State Notify */
147 STATIC void mvneta_update_autoneg(struct mvneta_softc *, int);
148 STATIC int mvneta_update_media(struct mvneta_softc *, int);
149 STATIC void mvneta_adjust_link(struct mvneta_softc *);
150 STATIC void mvneta_update_eee(struct mvneta_softc *);
151 STATIC void mvneta_update_fc(struct mvneta_softc *);
152 STATIC void mvneta_link_isr(struct mvneta_softc *);
153 STATIC void mvneta_linkupdate(struct mvneta_softc *, boolean_t);
154 STATIC void mvneta_linkup(struct mvneta_softc *);
155 STATIC void mvneta_linkdown(struct mvneta_softc *);
156 STATIC void mvneta_linkreset(struct mvneta_softc *);
159 STATIC int mvneta_tx_queue(struct mvneta_softc *, struct mbuf **, int);
160 STATIC void mvneta_tx_set_csumflag(struct ifnet *,
161 struct mvneta_tx_desc *, struct mbuf *);
162 STATIC void mvneta_tx_queue_complete(struct mvneta_softc *, int);
163 STATIC void mvneta_tx_drain(struct mvneta_softc *);
166 STATIC int mvneta_rx(struct mvneta_softc *, int, int);
167 STATIC void mvneta_rx_queue(struct mvneta_softc *, int, int);
168 STATIC void mvneta_rx_queue_refill(struct mvneta_softc *, int);
169 STATIC void mvneta_rx_set_csumflag(struct ifnet *,
170 struct mvneta_rx_desc *, struct mbuf *);
171 STATIC void mvneta_rx_buf_free(struct mvneta_softc *, struct mvneta_buf *);
173 /* MAC address filter */
174 STATIC void mvneta_filter_setup(struct mvneta_softc *);
177 STATIC int sysctl_read_mib(SYSCTL_HANDLER_ARGS);
178 STATIC int sysctl_clear_mib(SYSCTL_HANDLER_ARGS);
179 STATIC int sysctl_set_queue_rxthtime(SYSCTL_HANDLER_ARGS);
180 STATIC void sysctl_mvneta_init(struct mvneta_softc *);
183 STATIC void mvneta_clear_mib(struct mvneta_softc *);
184 STATIC void mvneta_update_mib(struct mvneta_softc *);
187 STATIC boolean_t mvneta_has_switch(device_t);
189 #define mvneta_sc_lock(sc) mtx_lock(&sc->mtx)
190 #define mvneta_sc_unlock(sc) mtx_unlock(&sc->mtx)
192 STATIC struct mtx mii_mutex;
193 STATIC int mii_init = 0;
196 STATIC int mvneta_detach(device_t);
198 STATIC int mvneta_miibus_readreg(device_t, int, int);
199 STATIC int mvneta_miibus_writereg(device_t, int, int, int);
201 static device_method_t mvneta_methods[] = {
202 /* Device interface */
203 DEVMETHOD(device_detach, mvneta_detach),
205 DEVMETHOD(miibus_readreg, mvneta_miibus_readreg),
206 DEVMETHOD(miibus_writereg, mvneta_miibus_writereg),
208 DEVMETHOD(mdio_readreg, mvneta_miibus_readreg),
209 DEVMETHOD(mdio_writereg, mvneta_miibus_writereg),
215 DEFINE_CLASS_0(mvneta, mvneta_driver, mvneta_methods, sizeof(struct mvneta_softc));
217 DRIVER_MODULE(miibus, mvneta, miibus_driver, miibus_devclass, 0, 0);
218 DRIVER_MODULE(mdio, mvneta, mdio_driver, mdio_devclass, 0, 0);
219 MODULE_DEPEND(mvneta, mdio, 1, 1, 1);
220 MODULE_DEPEND(mvneta, ether, 1, 1, 1);
221 MODULE_DEPEND(mvneta, miibus, 1, 1, 1);
222 MODULE_DEPEND(mvneta, mvxpbm, 1, 1, 1);
225 * List of MIB register and names
229 MVNETA_MIB_RX_GOOD_OCT_IDX,
230 MVNETA_MIB_RX_BAD_OCT_IDX,
231 MVNETA_MIB_TX_MAC_TRNS_ERR_IDX,
232 MVNETA_MIB_RX_GOOD_FRAME_IDX,
233 MVNETA_MIB_RX_BAD_FRAME_IDX,
234 MVNETA_MIB_RX_BCAST_FRAME_IDX,
235 MVNETA_MIB_RX_MCAST_FRAME_IDX,
236 MVNETA_MIB_RX_FRAME64_OCT_IDX,
237 MVNETA_MIB_RX_FRAME127_OCT_IDX,
238 MVNETA_MIB_RX_FRAME255_OCT_IDX,
239 MVNETA_MIB_RX_FRAME511_OCT_IDX,
240 MVNETA_MIB_RX_FRAME1023_OCT_IDX,
241 MVNETA_MIB_RX_FRAMEMAX_OCT_IDX,
242 MVNETA_MIB_TX_GOOD_OCT_IDX,
243 MVNETA_MIB_TX_GOOD_FRAME_IDX,
244 MVNETA_MIB_TX_EXCES_COL_IDX,
245 MVNETA_MIB_TX_MCAST_FRAME_IDX,
246 MVNETA_MIB_TX_BCAST_FRAME_IDX,
247 MVNETA_MIB_TX_MAC_CTL_ERR_IDX,
248 MVNETA_MIB_FC_SENT_IDX,
249 MVNETA_MIB_FC_GOOD_IDX,
250 MVNETA_MIB_FC_BAD_IDX,
251 MVNETA_MIB_PKT_UNDERSIZE_IDX,
252 MVNETA_MIB_PKT_FRAGMENT_IDX,
253 MVNETA_MIB_PKT_OVERSIZE_IDX,
254 MVNETA_MIB_PKT_JABBER_IDX,
255 MVNETA_MIB_MAC_RX_ERR_IDX,
256 MVNETA_MIB_MAC_CRC_ERR_IDX,
257 MVNETA_MIB_MAC_COL_IDX,
258 MVNETA_MIB_MAC_LATE_COL_IDX,
261 STATIC struct mvneta_mib_def {
264 const char *sysctl_name;
266 } mvneta_mib_list[] = {
267 [MVNETA_MIB_RX_GOOD_OCT_IDX] = {MVNETA_MIB_RX_GOOD_OCT, 1,
268 "rx_good_oct", "Good Octets Rx"},
269 [MVNETA_MIB_RX_BAD_OCT_IDX] = {MVNETA_MIB_RX_BAD_OCT, 0,
270 "rx_bad_oct", "Bad Octets Rx"},
271 [MVNETA_MIB_TX_MAC_TRNS_ERR_IDX] = {MVNETA_MIB_TX_MAC_TRNS_ERR, 0,
272 "tx_mac_err", "MAC Transmit Error"},
273 [MVNETA_MIB_RX_GOOD_FRAME_IDX] = {MVNETA_MIB_RX_GOOD_FRAME, 0,
274 "rx_good_frame", "Good Frames Rx"},
275 [MVNETA_MIB_RX_BAD_FRAME_IDX] = {MVNETA_MIB_RX_BAD_FRAME, 0,
276 "rx_bad_frame", "Bad Frames Rx"},
277 [MVNETA_MIB_RX_BCAST_FRAME_IDX] = {MVNETA_MIB_RX_BCAST_FRAME, 0,
278 "rx_bcast_frame", "Broadcast Frames Rx"},
279 [MVNETA_MIB_RX_MCAST_FRAME_IDX] = {MVNETA_MIB_RX_MCAST_FRAME, 0,
280 "rx_mcast_frame", "Multicast Frames Rx"},
281 [MVNETA_MIB_RX_FRAME64_OCT_IDX] = {MVNETA_MIB_RX_FRAME64_OCT, 0,
282 "rx_frame_1_64", "Frame Size 1 - 64"},
283 [MVNETA_MIB_RX_FRAME127_OCT_IDX] = {MVNETA_MIB_RX_FRAME127_OCT, 0,
284 "rx_frame_65_127", "Frame Size 65 - 127"},
285 [MVNETA_MIB_RX_FRAME255_OCT_IDX] = {MVNETA_MIB_RX_FRAME255_OCT, 0,
286 "rx_frame_128_255", "Frame Size 128 - 255"},
287 [MVNETA_MIB_RX_FRAME511_OCT_IDX] = {MVNETA_MIB_RX_FRAME511_OCT, 0,
288 "rx_frame_256_511", "Frame Size 256 - 511"},
289 [MVNETA_MIB_RX_FRAME1023_OCT_IDX] = {MVNETA_MIB_RX_FRAME1023_OCT, 0,
290 "rx_frame_512_1023", "Frame Size 512 - 1023"},
291 [MVNETA_MIB_RX_FRAMEMAX_OCT_IDX] = {MVNETA_MIB_RX_FRAMEMAX_OCT, 0,
292 "rx_fame_1024_max", "Frame Size 1024 - Max"},
293 [MVNETA_MIB_TX_GOOD_OCT_IDX] = {MVNETA_MIB_TX_GOOD_OCT, 1,
294 "tx_good_oct", "Good Octets Tx"},
295 [MVNETA_MIB_TX_GOOD_FRAME_IDX] = {MVNETA_MIB_TX_GOOD_FRAME, 0,
296 "tx_good_frame", "Good Frames Tx"},
297 [MVNETA_MIB_TX_EXCES_COL_IDX] = {MVNETA_MIB_TX_EXCES_COL, 0,
298 "tx_exces_collision", "Excessive Collision"},
299 [MVNETA_MIB_TX_MCAST_FRAME_IDX] = {MVNETA_MIB_TX_MCAST_FRAME, 0,
300 "tx_mcast_frame", "Multicast Frames Tx"},
301 [MVNETA_MIB_TX_BCAST_FRAME_IDX] = {MVNETA_MIB_TX_BCAST_FRAME, 0,
302 "tx_bcast_frame", "Broadcast Frames Tx"},
303 [MVNETA_MIB_TX_MAC_CTL_ERR_IDX] = {MVNETA_MIB_TX_MAC_CTL_ERR, 0,
304 "tx_mac_ctl_err", "Unknown MAC Control"},
305 [MVNETA_MIB_FC_SENT_IDX] = {MVNETA_MIB_FC_SENT, 0,
306 "fc_tx", "Flow Control Tx"},
307 [MVNETA_MIB_FC_GOOD_IDX] = {MVNETA_MIB_FC_GOOD, 0,
308 "fc_rx_good", "Good Flow Control Rx"},
309 [MVNETA_MIB_FC_BAD_IDX] = {MVNETA_MIB_FC_BAD, 0,
310 "fc_rx_bad", "Bad Flow Control Rx"},
311 [MVNETA_MIB_PKT_UNDERSIZE_IDX] = {MVNETA_MIB_PKT_UNDERSIZE, 0,
312 "pkt_undersize", "Undersized Packets Rx"},
313 [MVNETA_MIB_PKT_FRAGMENT_IDX] = {MVNETA_MIB_PKT_FRAGMENT, 0,
314 "pkt_fragment", "Fragmented Packets Rx"},
315 [MVNETA_MIB_PKT_OVERSIZE_IDX] = {MVNETA_MIB_PKT_OVERSIZE, 0,
316 "pkt_oversize", "Oversized Packets Rx"},
317 [MVNETA_MIB_PKT_JABBER_IDX] = {MVNETA_MIB_PKT_JABBER, 0,
318 "pkt_jabber", "Jabber Packets Rx"},
319 [MVNETA_MIB_MAC_RX_ERR_IDX] = {MVNETA_MIB_MAC_RX_ERR, 0,
320 "mac_rx_err", "MAC Rx Errors"},
321 [MVNETA_MIB_MAC_CRC_ERR_IDX] = {MVNETA_MIB_MAC_CRC_ERR, 0,
322 "mac_crc_err", "MAC CRC Errors"},
323 [MVNETA_MIB_MAC_COL_IDX] = {MVNETA_MIB_MAC_COL, 0,
324 "mac_collision", "MAC Collision"},
325 [MVNETA_MIB_MAC_LATE_COL_IDX] = {MVNETA_MIB_MAC_LATE_COL, 0,
326 "mac_late_collision", "MAC Late Collision"},
329 static struct resource_spec res_spec[] = {
330 { SYS_RES_MEMORY, 0, RF_ACTIVE },
331 { SYS_RES_IRQ, 0, RF_ACTIVE },
336 driver_intr_t *handler;
339 { mvneta_rxtxth_intr, "MVNETA aggregated interrupt" },
343 mvneta_set_mac_address(struct mvneta_softc *sc, uint8_t *addr)
348 mac_l = (addr[4] << 8) | (addr[5]);
349 mac_h = (addr[0] << 24) | (addr[1] << 16) |
350 (addr[2] << 8) | (addr[3] << 0);
352 MVNETA_WRITE(sc, MVNETA_MACAL, mac_l);
353 MVNETA_WRITE(sc, MVNETA_MACAH, mac_h);
358 mvneta_get_mac_address(struct mvneta_softc *sc, uint8_t *addr)
360 uint32_t mac_l, mac_h;
363 if (mvneta_fdt_mac_address(sc, addr) == 0)
367 * Fall back -- use the currently programmed address.
369 mac_l = MVNETA_READ(sc, MVNETA_MACAL);
370 mac_h = MVNETA_READ(sc, MVNETA_MACAH);
371 if (mac_l == 0 && mac_h == 0) {
373 * Generate pseudo-random MAC.
374 * Set lower part to random number | unit number.
376 mac_l = arc4random() & ~0xff;
377 mac_l |= device_get_unit(sc->dev) & 0xff;
378 mac_h = arc4random();
379 mac_h &= ~(3 << 24); /* Clear multicast and LAA bits */
381 device_printf(sc->dev,
382 "Could not acquire MAC address. "
383 "Using randomized one.\n");
387 addr[0] = (mac_h & 0xff000000) >> 24;
388 addr[1] = (mac_h & 0x00ff0000) >> 16;
389 addr[2] = (mac_h & 0x0000ff00) >> 8;
390 addr[3] = (mac_h & 0x000000ff);
391 addr[4] = (mac_l & 0x0000ff00) >> 8;
392 addr[5] = (mac_l & 0x000000ff);
397 mvneta_has_switch(device_t self)
399 phandle_t node, switch_node, switch_eth, switch_eth_handle;
401 node = ofw_bus_get_node(self);
403 ofw_bus_find_compatible(OF_finddevice("/"), "marvell,dsa");
406 OF_getencprop(switch_node, "dsa,ethernet",
407 (void*)&switch_eth_handle, sizeof(switch_eth_handle));
409 if (switch_eth_handle > 0)
410 switch_eth = OF_node_from_xref(switch_eth_handle);
412 /* Return true if dsa,ethernet cell points to us */
413 return (node == switch_eth);
417 mvneta_dma_create(struct mvneta_softc *sc)
419 size_t maxsize, maxsegsz;
426 maxsize = maxsegsz = sizeof(struct mvneta_tx_desc) * MVNETA_TX_RING_CNT;
428 error = bus_dma_tag_create(
429 bus_get_dma_tag(sc->dev), /* parent */
430 16, 0, /* alignment, boundary */
431 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
432 BUS_SPACE_MAXADDR, /* highaddr */
433 NULL, NULL, /* filtfunc, filtfuncarg */
434 maxsize, /* maxsize */
436 maxsegsz, /* maxsegsz */
438 NULL, NULL, /* lockfunc, lockfuncarg */
439 &sc->tx_dtag); /* dmat */
441 device_printf(sc->dev,
442 "Failed to create DMA tag for Tx descriptors.\n");
445 error = bus_dma_tag_create(
446 bus_get_dma_tag(sc->dev), /* parent */
447 1, 0, /* alignment, boundary */
448 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
449 BUS_SPACE_MAXADDR, /* highaddr */
450 NULL, NULL, /* filtfunc, filtfuncarg */
451 MVNETA_PACKET_SIZE, /* maxsize */
452 MVNETA_TX_SEGLIMIT, /* nsegments */
453 MVNETA_PACKET_SIZE, /* maxsegsz */
454 BUS_DMA_ALLOCNOW, /* flags */
455 NULL, NULL, /* lockfunc, lockfuncarg */
458 device_printf(sc->dev,
459 "Failed to create DMA tag for Tx mbufs.\n");
463 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
464 error = mvneta_ring_alloc_tx_queue(sc, q);
466 device_printf(sc->dev,
467 "Failed to allocate DMA safe memory for TxQ: %d\n", q);
475 /* Create tag for Rx descripors */
476 error = bus_dma_tag_create(
477 bus_get_dma_tag(sc->dev), /* parent */
478 32, 0, /* alignment, boundary */
479 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
480 BUS_SPACE_MAXADDR, /* highaddr */
481 NULL, NULL, /* filtfunc, filtfuncarg */
482 sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT, /* maxsize */
484 sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT, /* maxsegsz */
486 NULL, NULL, /* lockfunc, lockfuncarg */
487 &sc->rx_dtag); /* dmat */
489 device_printf(sc->dev,
490 "Failed to create DMA tag for Rx descriptors.\n");
494 /* Create tag for Rx buffers */
495 error = bus_dma_tag_create(
496 bus_get_dma_tag(sc->dev), /* parent */
497 32, 0, /* alignment, boundary */
498 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
499 BUS_SPACE_MAXADDR, /* highaddr */
500 NULL, NULL, /* filtfunc, filtfuncarg */
501 MVNETA_PACKET_SIZE, 1, /* maxsize, nsegments */
502 MVNETA_PACKET_SIZE, /* maxsegsz */
504 NULL, NULL, /* lockfunc, lockfuncarg */
505 &sc->rxbuf_dtag); /* dmat */
507 device_printf(sc->dev,
508 "Failed to create DMA tag for Rx buffers.\n");
512 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
513 if (mvneta_ring_alloc_rx_queue(sc, q) != 0) {
514 device_printf(sc->dev,
515 "Failed to allocate DMA safe memory for RxQ: %d\n", q);
522 mvneta_detach(sc->dev);
529 mvneta_attach(device_t self)
531 struct mvneta_softc *sc;
538 sc = device_get_softc(self);
541 mtx_init(&sc->mtx, "mvneta_sc", NULL, MTX_DEF);
543 error = bus_alloc_resources(self, res_spec, sc->res);
545 device_printf(self, "could not allocate resources\n");
549 sc->version = MVNETA_READ(sc, MVNETA_PV);
550 device_printf(self, "version is %x\n", sc->version);
551 callout_init(&sc->tick_ch, 0);
554 * make sure DMA engines are in reset state
556 MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000001);
557 MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000001);
560 * Disable port snoop for buffers and descriptors
561 * to avoid L2 caching of both without DRAM copy.
562 * Obtain coherency settings from the first MBUS
565 if ((MVNETA_READ(sc, MV_WIN_NETA_BASE(0)) & IO_WIN_COH_ATTR_MASK) == 0) {
566 reg = MVNETA_READ(sc, MVNETA_PSNPCFG);
567 reg &= ~MVNETA_PSNPCFG_DESCSNP_MASK;
568 reg &= ~MVNETA_PSNPCFG_BUFSNP_MASK;
569 MVNETA_WRITE(sc, MVNETA_PSNPCFG, reg);
575 if (mvneta_get_mac_address(sc, sc->enaddr)) {
576 device_printf(self, "no mac address.\n");
579 mvneta_set_mac_address(sc, sc->enaddr);
581 mvneta_disable_intr(sc);
583 /* Allocate network interface */
584 ifp = sc->ifp = if_alloc(IFT_ETHER);
586 device_printf(self, "if_alloc() failed\n");
590 if_initname(ifp, device_get_name(self), device_get_unit(self));
593 * We can support 802.1Q VLAN-sized frames and jumbo
596 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_JUMBO_MTU;
599 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
600 #ifdef MVNETA_MULTIQUEUE
601 ifp->if_transmit = mvneta_transmit;
602 ifp->if_qflush = mvneta_qflush;
603 #else /* !MVNETA_MULTIQUEUE */
604 ifp->if_start = mvneta_start;
605 ifp->if_snd.ifq_drv_maxlen = MVNETA_TX_RING_CNT - 1;
606 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
607 IFQ_SET_READY(&ifp->if_snd);
609 ifp->if_init = mvneta_init;
610 ifp->if_ioctl = mvneta_ioctl;
613 * We can do IPv4/TCPv4/UDPv4/TCPv6/UDPv6 checksums in hardware.
615 ifp->if_capabilities |= IFCAP_HWCSUM;
618 * As VLAN hardware tagging is not supported
619 * but is necessary to perform VLAN hardware checksums,
620 * it is done in the driver
622 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
625 * Currently IPv6 HW checksum is broken, so make sure it is disabled.
627 ifp->if_capabilities &= ~IFCAP_HWCSUM_IPV6;
628 ifp->if_capenable = ifp->if_capabilities;
631 * Disabled option(s):
632 * - Support for Large Receive Offload
634 ifp->if_capabilities |= IFCAP_LRO;
636 ifp->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP;
639 * Device DMA Buffer allocation.
640 * Handles resource deallocation in case of failure.
642 error = mvneta_dma_create(sc);
648 /* Initialize queues */
649 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
650 error = mvneta_ring_init_tx_queue(sc, q);
657 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
658 error = mvneta_ring_init_rx_queue(sc, q);
665 ether_ifattach(ifp, sc->enaddr);
668 * Enable DMA engines and Initialize Device Registers.
670 MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000000);
671 MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000000);
672 MVNETA_WRITE(sc, MVNETA_PACC, MVNETA_PACC_ACCELERATIONMODE_EDM);
674 mvneta_filter_setup(sc);
675 mvneta_sc_unlock(sc);
679 * Now MAC is working, setup MII.
683 * MII bus is shared by all MACs and all PHYs in SoC.
684 * serializing the bus access should be safe.
686 mtx_init(&mii_mutex, "mvneta_mii", NULL, MTX_DEF);
691 if ((sc->phy_addr != MII_PHY_ANY) && (!sc->use_inband_status)) {
692 error = mii_attach(self, &sc->miibus, ifp, mvneta_mediachange,
693 mvneta_mediastatus, BMSR_DEFCAPMASK, sc->phy_addr,
698 "MII attach failed, error: %d\n", error);
700 ether_ifdetach(sc->ifp);
704 sc->mii = device_get_softc(sc->miibus);
705 sc->phy_attached = 1;
707 /* Disable auto-negotiation in MAC - rely on PHY layer */
708 mvneta_update_autoneg(sc, FALSE);
709 } else if (sc->use_inband_status == TRUE) {
710 /* In-band link status */
711 ifmedia_init(&sc->mvneta_ifmedia, 0, mvneta_mediachange,
714 /* Configure media */
715 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX,
717 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL);
718 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX,
720 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_10_T, 0, NULL);
721 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX,
723 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
724 ifmedia_set(&sc->mvneta_ifmedia, IFM_ETHER | IFM_AUTO);
726 /* Enable auto-negotiation */
727 mvneta_update_autoneg(sc, TRUE);
730 if (MVNETA_IS_LINKUP(sc))
734 mvneta_sc_unlock(sc);
737 /* Fixed-link, use predefined values */
738 ifmedia_init(&sc->mvneta_ifmedia, 0, mvneta_mediachange,
741 ifm_target = IFM_ETHER;
742 switch (sc->phy_speed) {
744 if (sc->phy_mode != MVNETA_PHY_SGMII &&
745 sc->phy_mode != MVNETA_PHY_QSGMII) {
747 "2.5G speed can work only in (Q)SGMII mode\n");
748 ether_ifdetach(sc->ifp);
752 ifm_target |= IFM_2500_T;
755 ifm_target |= IFM_1000_T;
758 ifm_target |= IFM_100_TX;
761 ifm_target |= IFM_10_T;
764 ether_ifdetach(sc->ifp);
770 ifm_target |= IFM_FDX;
772 ifm_target |= IFM_HDX;
774 ifmedia_add(&sc->mvneta_ifmedia, ifm_target, 0, NULL);
775 ifmedia_set(&sc->mvneta_ifmedia, ifm_target);
776 if_link_state_change(sc->ifp, LINK_STATE_UP);
778 if (mvneta_has_switch(self)) {
779 child = device_add_child(sc->dev, "mdio", -1);
781 ether_ifdetach(sc->ifp);
785 bus_generic_attach(sc->dev);
786 bus_generic_attach(child);
789 /* Configure MAC media */
790 mvneta_update_media(sc, ifm_target);
793 sysctl_mvneta_init(sc);
795 callout_reset(&sc->tick_ch, 0, mvneta_tick, sc);
797 error = bus_setup_intr(self, sc->res[1],
798 INTR_TYPE_NET | INTR_MPSAFE, NULL, mvneta_intrs[0].handler, sc,
801 device_printf(self, "could not setup %s\n",
802 mvneta_intrs[0].description);
803 ether_ifdetach(sc->ifp);
812 mvneta_detach(device_t dev)
814 struct mvneta_softc *sc;
818 sc = device_get_softc(dev);
822 /* Detach network interface */
826 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++)
827 mvneta_ring_dealloc_rx_queue(sc, q);
828 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++)
829 mvneta_ring_dealloc_tx_queue(sc, q);
831 if (sc->tx_dtag != NULL)
832 bus_dma_tag_destroy(sc->tx_dtag);
833 if (sc->rx_dtag != NULL)
834 bus_dma_tag_destroy(sc->rx_dtag);
835 if (sc->txmbuf_dtag != NULL)
836 bus_dma_tag_destroy(sc->txmbuf_dtag);
838 bus_release_resources(dev, res_spec, sc->res);
846 mvneta_miibus_readreg(device_t dev, int phy, int reg)
848 struct mvneta_softc *sc;
853 sc = device_get_softc(dev);
856 mtx_lock(&mii_mutex);
858 for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
859 if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
863 if (i == MVNETA_PHY_TIMEOUT) {
864 if_printf(ifp, "SMI busy timeout\n");
865 mtx_unlock(&mii_mutex);
869 smi = MVNETA_SMI_PHYAD(phy) |
870 MVNETA_SMI_REGAD(reg) | MVNETA_SMI_OPCODE_READ;
871 MVNETA_WRITE(sc, MVNETA_SMI, smi);
873 for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
874 if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
879 if (i == MVNETA_PHY_TIMEOUT) {
880 if_printf(ifp, "SMI busy timeout\n");
881 mtx_unlock(&mii_mutex);
884 for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
885 smi = MVNETA_READ(sc, MVNETA_SMI);
886 if (smi & MVNETA_SMI_READVALID)
891 if (i == MVNETA_PHY_TIMEOUT) {
892 if_printf(ifp, "SMI busy timeout\n");
893 mtx_unlock(&mii_mutex);
897 mtx_unlock(&mii_mutex);
900 CTR3(KTR_SPARE2, "%s i=%d, timeout=%d\n", ifp->if_xname, i,
904 val = smi & MVNETA_SMI_DATA_MASK;
907 CTR4(KTR_SPARE2, "%s phy=%d, reg=%#x, val=%#x\n", ifp->if_xname, phy,
914 mvneta_miibus_writereg(device_t dev, int phy, int reg, int val)
916 struct mvneta_softc *sc;
921 sc = device_get_softc(dev);
924 CTR4(KTR_SPARE2, "%s phy=%d, reg=%#x, val=%#x\n", ifp->if_xname,
928 mtx_lock(&mii_mutex);
930 for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
931 if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
935 if (i == MVNETA_PHY_TIMEOUT) {
936 if_printf(ifp, "SMI busy timeout\n");
937 mtx_unlock(&mii_mutex);
941 smi = MVNETA_SMI_PHYAD(phy) | MVNETA_SMI_REGAD(reg) |
942 MVNETA_SMI_OPCODE_WRITE | (val & MVNETA_SMI_DATA_MASK);
943 MVNETA_WRITE(sc, MVNETA_SMI, smi);
945 for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
946 if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
951 mtx_unlock(&mii_mutex);
953 if (i == MVNETA_PHY_TIMEOUT)
954 if_printf(ifp, "phy write timed out\n");
960 mvneta_portup(struct mvneta_softc *sc)
964 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
965 mvneta_rx_lockq(sc, q);
966 mvneta_rx_queue_enable(sc->ifp, q);
967 mvneta_rx_unlockq(sc, q);
970 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
971 mvneta_tx_lockq(sc, q);
972 mvneta_tx_queue_enable(sc->ifp, q);
973 mvneta_tx_unlockq(sc, q);
979 mvneta_portdown(struct mvneta_softc *sc)
981 struct mvneta_rx_ring *rx;
982 struct mvneta_tx_ring *tx;
986 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
987 rx = MVNETA_RX_RING(sc, q);
988 mvneta_rx_lockq(sc, q);
989 rx->queue_status = MVNETA_QUEUE_DISABLED;
990 mvneta_rx_unlockq(sc, q);
993 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
994 tx = MVNETA_TX_RING(sc, q);
995 mvneta_tx_lockq(sc, q);
996 tx->queue_status = MVNETA_QUEUE_DISABLED;
997 mvneta_tx_unlockq(sc, q);
1000 /* Wait for all Rx activity to terminate. */
1001 reg = MVNETA_READ(sc, MVNETA_RQC) & MVNETA_RQC_EN_MASK;
1002 reg = MVNETA_RQC_DIS(reg);
1003 MVNETA_WRITE(sc, MVNETA_RQC, reg);
1006 if (cnt >= RX_DISABLE_TIMEOUT) {
1008 "timeout for RX stopped. rqc 0x%x\n", reg);
1012 reg = MVNETA_READ(sc, MVNETA_RQC);
1013 } while ((reg & MVNETA_RQC_EN_MASK) != 0);
1015 /* Wait for all Tx activity to terminate. */
1016 reg = MVNETA_READ(sc, MVNETA_PIE);
1017 reg &= ~MVNETA_PIE_TXPKTINTRPTENB_MASK;
1018 MVNETA_WRITE(sc, MVNETA_PIE, reg);
1020 reg = MVNETA_READ(sc, MVNETA_PRXTXTIM);
1021 reg &= ~MVNETA_PRXTXTI_TBTCQ_MASK;
1022 MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg);
1024 reg = MVNETA_READ(sc, MVNETA_TQC) & MVNETA_TQC_EN_MASK;
1025 reg = MVNETA_TQC_DIS(reg);
1026 MVNETA_WRITE(sc, MVNETA_TQC, reg);
1029 if (cnt >= TX_DISABLE_TIMEOUT) {
1031 "timeout for TX stopped. tqc 0x%x\n", reg);
1035 reg = MVNETA_READ(sc, MVNETA_TQC);
1036 } while ((reg & MVNETA_TQC_EN_MASK) != 0);
1038 /* Wait for all Tx FIFO is empty */
1041 if (cnt >= TX_FIFO_EMPTY_TIMEOUT) {
1043 "timeout for TX FIFO drained. ps0 0x%x\n", reg);
1047 reg = MVNETA_READ(sc, MVNETA_PS0);
1048 } while (((reg & MVNETA_PS0_TXFIFOEMP) == 0) &&
1049 ((reg & MVNETA_PS0_TXINPROG) != 0));
1053 * Device Register Initialization
1054 * reset device registers to device driver default value.
1055 * the device is not enabled here.
1058 mvneta_initreg(struct ifnet *ifp)
1060 struct mvneta_softc *sc;
1066 CTR1(KTR_SPARE2, "%s initializing device register", ifp->if_xname);
1069 /* Disable Legacy WRR, Disable EJP, Release from reset. */
1070 MVNETA_WRITE(sc, MVNETA_TQC_1, 0);
1071 /* Enable mbus retry. */
1072 MVNETA_WRITE(sc, MVNETA_MBUS_CONF, MVNETA_MBUS_RETRY_EN);
1074 /* Init TX/RX Queue Registers */
1075 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
1076 mvneta_rx_lockq(sc, q);
1077 if (mvneta_rx_queue_init(ifp, q) != 0) {
1078 device_printf(sc->dev,
1079 "initialization failed: cannot initialize queue\n");
1080 mvneta_rx_unlockq(sc, q);
1083 mvneta_rx_unlockq(sc, q);
1085 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1086 mvneta_tx_lockq(sc, q);
1087 if (mvneta_tx_queue_init(ifp, q) != 0) {
1088 device_printf(sc->dev,
1089 "initialization failed: cannot initialize queue\n");
1090 mvneta_tx_unlockq(sc, q);
1093 mvneta_tx_unlockq(sc, q);
1097 * Ethernet Unit Control - disable automatic PHY management by HW.
1098 * In case the port uses SMI-controlled PHY, poll its status with
1099 * mii_tick() and update MAC settings accordingly.
1101 reg = MVNETA_READ(sc, MVNETA_EUC);
1102 reg &= ~MVNETA_EUC_POLLING;
1103 MVNETA_WRITE(sc, MVNETA_EUC, reg);
1105 /* EEE: Low Power Idle */
1106 reg = MVNETA_LPIC0_LILIMIT(MVNETA_LPI_LI);
1107 reg |= MVNETA_LPIC0_TSLIMIT(MVNETA_LPI_TS);
1108 MVNETA_WRITE(sc, MVNETA_LPIC0, reg);
1110 reg = MVNETA_LPIC1_TWLIMIT(MVNETA_LPI_TW);
1111 MVNETA_WRITE(sc, MVNETA_LPIC1, reg);
1113 reg = MVNETA_LPIC2_MUSTSET;
1114 MVNETA_WRITE(sc, MVNETA_LPIC2, reg);
1116 /* Port MAC Control set 0 */
1117 reg = MVNETA_PMACC0_MUSTSET; /* must write 0x1 */
1118 reg &= ~MVNETA_PMACC0_PORTEN; /* port is still disabled */
1119 reg |= MVNETA_PMACC0_FRAMESIZELIMIT(MVNETA_MAX_FRAME);
1120 MVNETA_WRITE(sc, MVNETA_PMACC0, reg);
1122 /* Port MAC Control set 2 */
1123 reg = MVNETA_READ(sc, MVNETA_PMACC2);
1124 switch (sc->phy_mode) {
1125 case MVNETA_PHY_QSGMII:
1126 reg |= (MVNETA_PMACC2_PCSEN | MVNETA_PMACC2_RGMIIEN);
1127 MVNETA_WRITE(sc, MVNETA_PSERDESCFG, MVNETA_PSERDESCFG_QSGMII);
1129 case MVNETA_PHY_SGMII:
1130 reg |= (MVNETA_PMACC2_PCSEN | MVNETA_PMACC2_RGMIIEN);
1131 MVNETA_WRITE(sc, MVNETA_PSERDESCFG, MVNETA_PSERDESCFG_SGMII);
1133 case MVNETA_PHY_RGMII:
1134 case MVNETA_PHY_RGMII_ID:
1135 reg |= MVNETA_PMACC2_RGMIIEN;
1138 reg |= MVNETA_PMACC2_MUSTSET;
1139 reg &= ~MVNETA_PMACC2_PORTMACRESET;
1140 MVNETA_WRITE(sc, MVNETA_PMACC2, reg);
1142 /* Port Configuration Extended: enable Tx CRC generation */
1143 reg = MVNETA_READ(sc, MVNETA_PXCX);
1144 reg &= ~MVNETA_PXCX_TXCRCDIS;
1145 MVNETA_WRITE(sc, MVNETA_PXCX, reg);
1147 /* clear MIB counter registers(clear by read) */
1148 for (i = 0; i < nitems(mvneta_mib_list); i++) {
1149 if (mvneta_mib_list[i].reg64)
1150 MVNETA_READ_MIB_8(sc, mvneta_mib_list[i].regnum);
1152 MVNETA_READ_MIB_4(sc, mvneta_mib_list[i].regnum);
1154 MVNETA_READ(sc, MVNETA_PDFC);
1155 MVNETA_READ(sc, MVNETA_POFC);
1157 /* Set SDC register except IPGINT bits */
1158 reg = MVNETA_SDC_RXBSZ_16_64BITWORDS;
1159 reg |= MVNETA_SDC_TXBSZ_16_64BITWORDS;
1160 reg |= MVNETA_SDC_BLMR;
1161 reg |= MVNETA_SDC_BLMT;
1162 MVNETA_WRITE(sc, MVNETA_SDC, reg);
1168 mvneta_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1173 *(bus_addr_t *)arg = segs->ds_addr;
1177 mvneta_ring_alloc_rx_queue(struct mvneta_softc *sc, int q)
1179 struct mvneta_rx_ring *rx;
1180 struct mvneta_buf *rxbuf;
1184 if (q >= MVNETA_RX_QNUM_MAX)
1187 rx = MVNETA_RX_RING(sc, q);
1188 mtx_init(&rx->ring_mtx, "mvneta_rx", NULL, MTX_DEF);
1189 /* Allocate DMA memory for Rx descriptors */
1190 error = bus_dmamem_alloc(sc->rx_dtag,
1191 (void**)&(rx->desc),
1192 BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1194 if (error != 0 || rx->desc == NULL)
1196 error = bus_dmamap_load(sc->rx_dtag, rx->desc_map,
1198 sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT,
1199 mvneta_dmamap_cb, &rx->desc_pa, BUS_DMA_NOWAIT);
1203 for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
1204 error = bus_dmamap_create(sc->rxbuf_dtag, 0, &dmap);
1206 device_printf(sc->dev,
1207 "Failed to create DMA map for Rx buffer num: %d\n", i);
1210 rxbuf = &rx->rxbuf[i];
1217 mvneta_ring_dealloc_rx_queue(sc, q);
1218 device_printf(sc->dev, "DMA Ring buffer allocation failure.\n");
1223 mvneta_ring_alloc_tx_queue(struct mvneta_softc *sc, int q)
1225 struct mvneta_tx_ring *tx;
1228 if (q >= MVNETA_TX_QNUM_MAX)
1230 tx = MVNETA_TX_RING(sc, q);
1231 mtx_init(&tx->ring_mtx, "mvneta_tx", NULL, MTX_DEF);
1232 error = bus_dmamem_alloc(sc->tx_dtag,
1233 (void**)&(tx->desc),
1234 BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1236 if (error != 0 || tx->desc == NULL)
1238 error = bus_dmamap_load(sc->tx_dtag, tx->desc_map,
1240 sizeof(struct mvneta_tx_desc) * MVNETA_TX_RING_CNT,
1241 mvneta_dmamap_cb, &tx->desc_pa, BUS_DMA_NOWAIT);
1245 #ifdef MVNETA_MULTIQUEUE
1246 tx->br = buf_ring_alloc(MVNETA_BUFRING_SIZE, M_DEVBUF, M_NOWAIT,
1248 if (tx->br == NULL) {
1249 device_printf(sc->dev,
1250 "Could not setup buffer ring for TxQ(%d)\n", q);
1258 mvneta_ring_dealloc_tx_queue(sc, q);
1259 device_printf(sc->dev, "DMA Ring buffer allocation failure.\n");
1264 mvneta_ring_dealloc_tx_queue(struct mvneta_softc *sc, int q)
1266 struct mvneta_tx_ring *tx;
1267 struct mvneta_buf *txbuf;
1272 if (q >= MVNETA_TX_QNUM_MAX)
1274 tx = MVNETA_TX_RING(sc, q);
1276 if (tx->taskq != NULL) {
1278 while (taskqueue_cancel(tx->taskq, &tx->task, NULL) != 0)
1279 taskqueue_drain(tx->taskq, &tx->task);
1281 #ifdef MVNETA_MULTIQUEUE
1283 drbr_free(tx->br, M_DEVBUF);
1286 if (sc->txmbuf_dtag != NULL) {
1287 if (mtx_name(&tx->ring_mtx) != NULL) {
1289 * It is assumed that maps are being loaded after mutex
1290 * is initialized. Therefore we can skip unloading maps
1291 * when mutex is empty.
1293 mvneta_tx_lockq(sc, q);
1294 mvneta_ring_flush_tx_queue(sc, q);
1295 mvneta_tx_unlockq(sc, q);
1297 for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
1298 txbuf = &tx->txbuf[i];
1299 if (txbuf->dmap != NULL) {
1300 error = bus_dmamap_destroy(sc->txmbuf_dtag,
1303 panic("%s: map busy for Tx descriptor (Q%d, %d)",
1310 if (tx->desc_pa != 0)
1311 bus_dmamap_unload(sc->tx_dtag, tx->desc_map);
1313 kva = (void *)tx->desc;
1315 bus_dmamem_free(sc->tx_dtag, tx->desc, tx->desc_map);
1317 if (mtx_name(&tx->ring_mtx) != NULL)
1318 mtx_destroy(&tx->ring_mtx);
1320 memset(tx, 0, sizeof(*tx));
1324 mvneta_ring_dealloc_rx_queue(struct mvneta_softc *sc, int q)
1326 struct mvneta_rx_ring *rx;
1327 struct lro_ctrl *lro;
1330 if (q >= MVNETA_RX_QNUM_MAX)
1333 rx = MVNETA_RX_RING(sc, q);
1335 mvneta_ring_flush_rx_queue(sc, q);
1337 if (rx->desc_pa != 0)
1338 bus_dmamap_unload(sc->rx_dtag, rx->desc_map);
1340 kva = (void *)rx->desc;
1342 bus_dmamem_free(sc->rx_dtag, rx->desc, rx->desc_map);
1347 if (mtx_name(&rx->ring_mtx) != NULL)
1348 mtx_destroy(&rx->ring_mtx);
1350 memset(rx, 0, sizeof(*rx));
1354 mvneta_ring_init_rx_queue(struct mvneta_softc *sc, int q)
1356 struct mvneta_rx_ring *rx;
1357 struct lro_ctrl *lro;
1360 if (q >= MVNETA_RX_QNUM_MAX)
1363 rx = MVNETA_RX_RING(sc, q);
1364 rx->dma = rx->cpu = 0;
1365 rx->queue_th_received = MVNETA_RXTH_COUNT;
1366 rx->queue_th_time = (get_tclk() / 1000) / 10; /* 0.1 [ms] */
1368 /* Initialize LRO */
1369 rx->lro_enabled = FALSE;
1370 if ((sc->ifp->if_capenable & IFCAP_LRO) != 0) {
1372 error = tcp_lro_init(lro);
1374 device_printf(sc->dev, "LRO Initialization failed!\n");
1376 rx->lro_enabled = TRUE;
1385 mvneta_ring_init_tx_queue(struct mvneta_softc *sc, int q)
1387 struct mvneta_tx_ring *tx;
1388 struct mvneta_buf *txbuf;
1391 if (q >= MVNETA_TX_QNUM_MAX)
1394 tx = MVNETA_TX_RING(sc, q);
1397 for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
1398 txbuf = &tx->txbuf[i];
1400 /* Tx handle needs DMA map for busdma_load_mbuf() */
1401 error = bus_dmamap_create(sc->txmbuf_dtag, 0,
1404 device_printf(sc->dev,
1405 "can't create dma map (tx ring %d)\n", i);
1409 tx->dma = tx->cpu = 0;
1412 tx->queue_status = MVNETA_QUEUE_DISABLED;
1413 tx->queue_hung = FALSE;
1417 TASK_INIT(&tx->task, 0, mvneta_tx_task, tx);
1418 tx->taskq = taskqueue_create_fast("mvneta_tx_taskq", M_WAITOK,
1419 taskqueue_thread_enqueue, &tx->taskq);
1420 taskqueue_start_threads(&tx->taskq, 1, PI_NET, "%s: tx_taskq(%d)",
1421 device_get_nameunit(sc->dev), q);
1427 mvneta_ring_flush_tx_queue(struct mvneta_softc *sc, int q)
1429 struct mvneta_tx_ring *tx;
1430 struct mvneta_buf *txbuf;
1433 tx = MVNETA_TX_RING(sc, q);
1434 KASSERT_TX_MTX(sc, q);
1437 for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
1438 txbuf = &tx->txbuf[i];
1439 bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
1440 if (txbuf->m != NULL) {
1445 tx->dma = tx->cpu = 0;
1450 mvneta_ring_flush_rx_queue(struct mvneta_softc *sc, int q)
1452 struct mvneta_rx_ring *rx;
1453 struct mvneta_buf *rxbuf;
1456 rx = MVNETA_RX_RING(sc, q);
1457 KASSERT_RX_MTX(sc, q);
1460 for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
1461 rxbuf = &rx->rxbuf[i];
1462 mvneta_rx_buf_free(sc, rxbuf);
1464 rx->dma = rx->cpu = 0;
1468 * Rx/Tx Queue Control
1471 mvneta_rx_queue_init(struct ifnet *ifp, int q)
1473 struct mvneta_softc *sc;
1474 struct mvneta_rx_ring *rx;
1478 KASSERT_RX_MTX(sc, q);
1479 rx = MVNETA_RX_RING(sc, q);
1480 DASSERT(rx->desc_pa != 0);
1482 /* descriptor address */
1483 MVNETA_WRITE(sc, MVNETA_PRXDQA(q), rx->desc_pa);
1485 /* Rx buffer size and descriptor ring size */
1486 reg = MVNETA_PRXDQS_BUFFERSIZE(MVNETA_PACKET_SIZE >> 3);
1487 reg |= MVNETA_PRXDQS_DESCRIPTORSQUEUESIZE(MVNETA_RX_RING_CNT);
1488 MVNETA_WRITE(sc, MVNETA_PRXDQS(q), reg);
1490 CTR3(KTR_SPARE2, "%s PRXDQS(%d): %#x", ifp->if_xname, q,
1491 MVNETA_READ(sc, MVNETA_PRXDQS(q)));
1493 /* Rx packet offset address */
1494 reg = MVNETA_PRXC_PACKETOFFSET(MVNETA_PACKET_OFFSET >> 3);
1495 MVNETA_WRITE(sc, MVNETA_PRXC(q), reg);
1497 CTR3(KTR_SPARE2, "%s PRXC(%d): %#x", ifp->if_xname, q,
1498 MVNETA_READ(sc, MVNETA_PRXC(q)));
1501 /* if DMA is not working, register is not updated */
1502 DASSERT(MVNETA_READ(sc, MVNETA_PRXDQA(q)) == rx->desc_pa);
1507 mvneta_tx_queue_init(struct ifnet *ifp, int q)
1509 struct mvneta_softc *sc;
1510 struct mvneta_tx_ring *tx;
1514 KASSERT_TX_MTX(sc, q);
1515 tx = MVNETA_TX_RING(sc, q);
1516 DASSERT(tx->desc_pa != 0);
1518 /* descriptor address */
1519 MVNETA_WRITE(sc, MVNETA_PTXDQA(q), tx->desc_pa);
1521 /* descriptor ring size */
1522 reg = MVNETA_PTXDQS_DQS(MVNETA_TX_RING_CNT);
1523 MVNETA_WRITE(sc, MVNETA_PTXDQS(q), reg);
1525 /* if DMA is not working, register is not updated */
1526 DASSERT(MVNETA_READ(sc, MVNETA_PTXDQA(q)) == tx->desc_pa);
1531 mvneta_rx_queue_enable(struct ifnet *ifp, int q)
1533 struct mvneta_softc *sc;
1534 struct mvneta_rx_ring *rx;
1538 rx = MVNETA_RX_RING(sc, q);
1539 KASSERT_RX_MTX(sc, q);
1541 /* Set Rx interrupt threshold */
1542 reg = MVNETA_PRXDQTH_ODT(rx->queue_th_received);
1543 MVNETA_WRITE(sc, MVNETA_PRXDQTH(q), reg);
1545 reg = MVNETA_PRXITTH_RITT(rx->queue_th_time);
1546 MVNETA_WRITE(sc, MVNETA_PRXITTH(q), reg);
1548 /* Unmask RXTX_TH Intr. */
1549 reg = MVNETA_READ(sc, MVNETA_PRXTXTIM);
1550 reg |= MVNETA_PRXTXTI_RBICTAPQ(q); /* Rx Buffer Interrupt Coalese */
1551 MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg);
1553 /* Enable Rx queue */
1554 reg = MVNETA_READ(sc, MVNETA_RQC) & MVNETA_RQC_EN_MASK;
1555 reg |= MVNETA_RQC_ENQ(q);
1556 MVNETA_WRITE(sc, MVNETA_RQC, reg);
1558 rx->queue_status = MVNETA_QUEUE_WORKING;
1563 mvneta_tx_queue_enable(struct ifnet *ifp, int q)
1565 struct mvneta_softc *sc;
1566 struct mvneta_tx_ring *tx;
1569 tx = MVNETA_TX_RING(sc, q);
1570 KASSERT_TX_MTX(sc, q);
1572 /* Enable Tx queue */
1573 MVNETA_WRITE(sc, MVNETA_TQC, MVNETA_TQC_ENQ(q));
1575 tx->queue_status = MVNETA_QUEUE_IDLE;
1576 tx->queue_hung = FALSE;
1580 STATIC __inline void
1581 mvneta_rx_lockq(struct mvneta_softc *sc, int q)
1585 DASSERT(q < MVNETA_RX_QNUM_MAX);
1586 mtx_lock(&sc->rx_ring[q].ring_mtx);
1589 STATIC __inline void
1590 mvneta_rx_unlockq(struct mvneta_softc *sc, int q)
1594 DASSERT(q < MVNETA_RX_QNUM_MAX);
1595 mtx_unlock(&sc->rx_ring[q].ring_mtx);
1598 STATIC __inline int __unused
1599 mvneta_tx_trylockq(struct mvneta_softc *sc, int q)
1603 DASSERT(q < MVNETA_TX_QNUM_MAX);
1604 return (mtx_trylock(&sc->tx_ring[q].ring_mtx));
1607 STATIC __inline void
1608 mvneta_tx_lockq(struct mvneta_softc *sc, int q)
1612 DASSERT(q < MVNETA_TX_QNUM_MAX);
1613 mtx_lock(&sc->tx_ring[q].ring_mtx);
1616 STATIC __inline void
1617 mvneta_tx_unlockq(struct mvneta_softc *sc, int q)
1621 DASSERT(q < MVNETA_TX_QNUM_MAX);
1622 mtx_unlock(&sc->tx_ring[q].ring_mtx);
1626 * Interrupt Handlers
1629 mvneta_disable_intr(struct mvneta_softc *sc)
1632 MVNETA_WRITE(sc, MVNETA_EUIM, 0);
1633 MVNETA_WRITE(sc, MVNETA_EUIC, 0);
1634 MVNETA_WRITE(sc, MVNETA_PRXTXTIM, 0);
1635 MVNETA_WRITE(sc, MVNETA_PRXTXTIC, 0);
1636 MVNETA_WRITE(sc, MVNETA_PRXTXIM, 0);
1637 MVNETA_WRITE(sc, MVNETA_PRXTXIC, 0);
1638 MVNETA_WRITE(sc, MVNETA_PMIM, 0);
1639 MVNETA_WRITE(sc, MVNETA_PMIC, 0);
1640 MVNETA_WRITE(sc, MVNETA_PIE, 0);
1644 mvneta_enable_intr(struct mvneta_softc *sc)
1648 /* Enable Summary Bit to check all interrupt cause. */
1649 reg = MVNETA_READ(sc, MVNETA_PRXTXTIM);
1650 reg |= MVNETA_PRXTXTI_PMISCICSUMMARY;
1651 MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg);
1653 if (sc->use_inband_status) {
1654 /* Enable Port MISC Intr. (via RXTX_TH_Summary bit) */
1655 MVNETA_WRITE(sc, MVNETA_PMIM, MVNETA_PMI_PHYSTATUSCHNG |
1656 MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHANGE);
1659 /* Enable All Queue Interrupt */
1660 reg = MVNETA_READ(sc, MVNETA_PIE);
1661 reg |= MVNETA_PIE_RXPKTINTRPTENB_MASK;
1662 reg |= MVNETA_PIE_TXPKTINTRPTENB_MASK;
1663 MVNETA_WRITE(sc, MVNETA_PIE, reg);
1667 mvneta_rxtxth_intr(void *arg)
1669 struct mvneta_softc *sc;
1671 uint32_t ic, queues;
1676 CTR1(KTR_SPARE2, "%s got RXTX_TH_Intr", ifp->if_xname);
1678 ic = MVNETA_READ(sc, MVNETA_PRXTXTIC);
1681 MVNETA_WRITE(sc, MVNETA_PRXTXTIC, ~ic);
1683 /* Ack maintance interrupt first */
1684 if (__predict_false((ic & MVNETA_PRXTXTI_PMISCICSUMMARY) &&
1685 sc->use_inband_status)) {
1687 mvneta_misc_intr(sc);
1688 mvneta_sc_unlock(sc);
1690 if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING)))
1692 /* RxTxTH interrupt */
1693 queues = MVNETA_PRXTXTI_GET_RBICTAPQ(ic);
1694 if (__predict_true(queues)) {
1696 CTR1(KTR_SPARE2, "%s got PRXTXTIC: +RXEOF", ifp->if_xname);
1698 /* At the moment the driver support only one RX queue. */
1699 DASSERT(MVNETA_IS_QUEUE_SET(queues, 0));
1700 mvneta_rx(sc, 0, 0);
1705 mvneta_misc_intr(struct mvneta_softc *sc)
1711 CTR1(KTR_SPARE2, "%s got MISC_INTR", sc->ifp->if_xname);
1716 ic = MVNETA_READ(sc, MVNETA_PMIC);
1717 ic &= MVNETA_READ(sc, MVNETA_PMIM);
1720 MVNETA_WRITE(sc, MVNETA_PMIC, ~ic);
1723 if (ic & (MVNETA_PMI_PHYSTATUSCHNG |
1724 MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHANGE))
1725 mvneta_link_isr(sc);
1731 mvneta_tick(void *arg)
1733 struct mvneta_softc *sc;
1734 struct mvneta_tx_ring *tx;
1735 struct mvneta_rx_ring *rx;
1737 uint32_t fc_prev, fc_curr;
1742 * This is done before mib update to get the right stats
1745 mvneta_tx_drain(sc);
1747 /* Extract previous flow-control frame received counter. */
1748 fc_prev = sc->sysctl_mib[MVNETA_MIB_FC_GOOD_IDX].counter;
1749 /* Read mib registers (clear by read). */
1750 mvneta_update_mib(sc);
1751 /* Extract current flow-control frame received counter. */
1752 fc_curr = sc->sysctl_mib[MVNETA_MIB_FC_GOOD_IDX].counter;
1755 if (sc->phy_attached && sc->ifp->if_flags & IFF_UP) {
1759 /* Adjust MAC settings */
1760 mvneta_adjust_link(sc);
1761 mvneta_sc_unlock(sc);
1765 * We were unable to refill the rx queue and left the rx func, leaving
1766 * the ring without mbuf and no way to call the refill func.
1768 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
1769 rx = MVNETA_RX_RING(sc, q);
1770 if (rx->needs_refill == TRUE) {
1771 mvneta_rx_lockq(sc, q);
1772 mvneta_rx_queue_refill(sc, q);
1773 mvneta_rx_unlockq(sc, q);
1779 * - check if queue is mark as hung.
1780 * - ignore hung status if we received some pause frame
1781 * as hardware may have paused packet transmit.
1783 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1785 * We should take queue lock, but as we only read
1786 * queue status we can do it without lock, we may
1787 * only missdetect queue status for one tick.
1789 tx = MVNETA_TX_RING(sc, q);
1791 if (tx->queue_hung && (fc_curr - fc_prev) == 0)
1795 callout_schedule(&sc->tick_ch, hz);
1799 if_printf(sc->ifp, "watchdog timeout\n");
1802 sc->counter_watchdog++;
1803 sc->counter_watchdog_mib++;
1804 /* Trigger reinitialize sequence. */
1805 mvneta_stop_locked(sc);
1806 mvneta_init_locked(sc);
1807 mvneta_sc_unlock(sc);
1811 mvneta_qflush(struct ifnet *ifp)
1813 #ifdef MVNETA_MULTIQUEUE
1814 struct mvneta_softc *sc;
1815 struct mvneta_tx_ring *tx;
1821 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1822 tx = MVNETA_TX_RING(sc, q);
1823 mvneta_tx_lockq(sc, q);
1824 while ((m = buf_ring_dequeue_sc(tx->br)) != NULL)
1826 mvneta_tx_unlockq(sc, q);
1833 mvneta_tx_task(void *arg, int pending)
1835 struct mvneta_softc *sc;
1836 struct mvneta_tx_ring *tx;
1844 mvneta_tx_lockq(sc, tx->qidx);
1845 error = mvneta_xmit_locked(sc, tx->qidx);
1846 mvneta_tx_unlockq(sc, tx->qidx);
1849 if (__predict_false(error != 0 && error != ENETDOWN)) {
1850 pause("mvneta_tx_task_sleep", 1);
1851 taskqueue_enqueue(tx->taskq, &tx->task);
1856 mvneta_xmitfast_locked(struct mvneta_softc *sc, int q, struct mbuf **m)
1858 struct mvneta_tx_ring *tx;
1862 KASSERT_TX_MTX(sc, q);
1863 tx = MVNETA_TX_RING(sc, q);
1868 /* Dont enqueue packet if the queue is disabled. */
1869 if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED)) {
1875 /* Reclaim mbuf if above threshold. */
1876 if (__predict_true(tx->used > MVNETA_TX_RECLAIM_COUNT))
1877 mvneta_tx_queue_complete(sc, q);
1879 /* Do not call transmit path if queue is already too full. */
1880 if (__predict_false(tx->used >
1881 MVNETA_TX_RING_CNT - MVNETA_TX_SEGLIMIT))
1884 error = mvneta_tx_queue(sc, m, q);
1885 if (__predict_false(error != 0))
1888 /* Send a copy of the frame to the BPF listener */
1889 ETHER_BPF_MTAP(ifp, *m);
1891 /* Set watchdog on */
1892 tx->watchdog_time = ticks;
1893 tx->queue_status = MVNETA_QUEUE_WORKING;
1898 #ifdef MVNETA_MULTIQUEUE
1900 mvneta_transmit(struct ifnet *ifp, struct mbuf *m)
1902 struct mvneta_softc *sc;
1903 struct mvneta_tx_ring *tx;
1909 /* Use default queue if there is no flow id as thread can migrate. */
1910 if (__predict_true(M_HASHTYPE_GET(m) != M_HASHTYPE_NONE))
1911 q = m->m_pkthdr.flowid % MVNETA_TX_QNUM_MAX;
1915 tx = MVNETA_TX_RING(sc, q);
1917 /* If buf_ring is full start transmit immediatly. */
1918 if (buf_ring_full(tx->br)) {
1919 mvneta_tx_lockq(sc, q);
1920 mvneta_xmit_locked(sc, q);
1921 mvneta_tx_unlockq(sc, q);
1925 * If the buf_ring is empty we will not reorder packets.
1926 * If the lock is available transmit without using buf_ring.
1928 if (buf_ring_empty(tx->br) && mvneta_tx_trylockq(sc, q) != 0) {
1929 error = mvneta_xmitfast_locked(sc, q, &m);
1930 mvneta_tx_unlockq(sc, q);
1931 if (__predict_true(error == 0))
1934 /* Transmit can fail in fastpath. */
1935 if (__predict_false(m == NULL))
1939 /* Enqueue then schedule taskqueue. */
1940 error = drbr_enqueue(ifp, tx->br, m);
1941 if (__predict_false(error != 0))
1944 taskqueue_enqueue(tx->taskq, &tx->task);
1949 mvneta_xmit_locked(struct mvneta_softc *sc, int q)
1952 struct mvneta_tx_ring *tx;
1956 KASSERT_TX_MTX(sc, q);
1958 tx = MVNETA_TX_RING(sc, q);
1961 while ((m = drbr_peek(ifp, tx->br)) != NULL) {
1962 error = mvneta_xmitfast_locked(sc, q, &m);
1963 if (__predict_false(error != 0)) {
1965 drbr_putback(ifp, tx->br, m);
1967 drbr_advance(ifp, tx->br);
1970 drbr_advance(ifp, tx->br);
1975 #else /* !MVNETA_MULTIQUEUE */
1977 mvneta_start(struct ifnet *ifp)
1979 struct mvneta_softc *sc;
1980 struct mvneta_tx_ring *tx;
1984 tx = MVNETA_TX_RING(sc, 0);
1986 mvneta_tx_lockq(sc, 0);
1987 error = mvneta_xmit_locked(sc, 0);
1988 mvneta_tx_unlockq(sc, 0);
1989 /* Handle retransmit in the background taskq. */
1990 if (__predict_false(error != 0 && error != ENETDOWN))
1991 taskqueue_enqueue(tx->taskq, &tx->task);
1995 mvneta_xmit_locked(struct mvneta_softc *sc, int q)
1998 struct mvneta_tx_ring *tx;
2002 KASSERT_TX_MTX(sc, q);
2004 tx = MVNETA_TX_RING(sc, 0);
2007 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
2008 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
2012 error = mvneta_xmitfast_locked(sc, q, &m);
2013 if (__predict_false(error != 0)) {
2015 IFQ_DRV_PREPEND(&ifp->if_snd, m);
2025 mvneta_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2027 struct mvneta_softc *sc;
2028 struct mvneta_rx_ring *rx;
2036 ifr = (struct ifreq *)data;
2040 if (ifp->if_flags & IFF_UP) {
2041 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2042 flags = ifp->if_flags ^ sc->mvneta_if_flags;
2045 sc->mvneta_if_flags = ifp->if_flags;
2047 if ((flags & IFF_PROMISC) != 0)
2048 mvneta_filter_setup(sc);
2050 mvneta_init_locked(sc);
2051 sc->mvneta_if_flags = ifp->if_flags;
2052 if (sc->phy_attached)
2053 mii_mediachg(sc->mii);
2054 mvneta_sc_unlock(sc);
2057 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2058 mvneta_stop_locked(sc);
2060 sc->mvneta_if_flags = ifp->if_flags;
2061 mvneta_sc_unlock(sc);
2064 if (ifp->if_mtu > MVNETA_MAX_CSUM_MTU &&
2065 ifr->ifr_reqcap & IFCAP_TXCSUM)
2066 ifr->ifr_reqcap &= ~IFCAP_TXCSUM;
2067 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
2068 if (mask & IFCAP_HWCSUM) {
2069 ifp->if_capenable &= ~IFCAP_HWCSUM;
2070 ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
2071 if (ifp->if_capenable & IFCAP_TXCSUM)
2072 ifp->if_hwassist = CSUM_IP | CSUM_TCP |
2075 ifp->if_hwassist = 0;
2077 if (mask & IFCAP_LRO) {
2079 ifp->if_capenable ^= IFCAP_LRO;
2080 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2081 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
2082 rx = MVNETA_RX_RING(sc, q);
2083 rx->lro_enabled = !rx->lro_enabled;
2086 mvneta_sc_unlock(sc);
2088 VLAN_CAPABILITIES(ifp);
2091 if ((IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T ||
2092 IFM_SUBTYPE(ifr->ifr_media) == IFM_2500_T) &&
2093 (ifr->ifr_media & IFM_FDX) == 0) {
2094 device_printf(sc->dev,
2095 "%s half-duplex unsupported\n",
2096 IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T ?
2102 case SIOCGIFMEDIA: /* FALLTHROUGH */
2104 if (!sc->phy_attached)
2105 error = ifmedia_ioctl(ifp, ifr, &sc->mvneta_ifmedia,
2108 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media,
2112 if (ifr->ifr_mtu < 68 || ifr->ifr_mtu > MVNETA_MAX_FRAME -
2113 MVNETA_ETHER_SIZE) {
2116 ifp->if_mtu = ifr->ifr_mtu;
2118 if (ifp->if_mtu > MVNETA_MAX_CSUM_MTU) {
2119 ifp->if_capenable &= ~IFCAP_TXCSUM;
2120 ifp->if_hwassist = 0;
2122 ifp->if_capenable |= IFCAP_TXCSUM;
2123 ifp->if_hwassist = CSUM_IP | CSUM_TCP |
2127 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2128 /* Trigger reinitialize sequence */
2129 mvneta_stop_locked(sc);
2130 mvneta_init_locked(sc);
2132 mvneta_sc_unlock(sc);
2137 error = ether_ioctl(ifp, cmd, data);
2145 mvneta_init_locked(void *arg)
2147 struct mvneta_softc *sc;
2155 if (!device_is_attached(sc->dev) ||
2156 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2159 mvneta_disable_intr(sc);
2160 callout_stop(&sc->tick_ch);
2162 /* Get the latest mac address */
2163 bcopy(IF_LLADDR(ifp), sc->enaddr, ETHER_ADDR_LEN);
2164 mvneta_set_mac_address(sc, sc->enaddr);
2165 mvneta_filter_setup(sc);
2167 /* Start DMA Engine */
2168 MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000000);
2169 MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000000);
2170 MVNETA_WRITE(sc, MVNETA_PACC, MVNETA_PACC_ACCELERATIONMODE_EDM);
2173 reg = MVNETA_READ(sc, MVNETA_PMACC0);
2174 reg |= MVNETA_PMACC0_PORTEN;
2175 MVNETA_WRITE(sc, MVNETA_PMACC0, reg);
2177 /* Allow access to each TXQ/RXQ from both CPU's */
2178 for (cpu = 0; cpu < mp_ncpus; ++cpu)
2179 MVNETA_WRITE(sc, MVNETA_PCP2Q(cpu),
2180 MVNETA_PCP2Q_TXQEN_MASK | MVNETA_PCP2Q_RXQEN_MASK);
2182 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
2183 mvneta_rx_lockq(sc, q);
2184 mvneta_rx_queue_refill(sc, q);
2185 mvneta_rx_unlockq(sc, q);
2188 if (!sc->phy_attached)
2191 /* Enable interrupt */
2192 mvneta_enable_intr(sc);
2195 callout_schedule(&sc->tick_ch, hz);
2197 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2201 mvneta_init(void *arg)
2203 struct mvneta_softc *sc;
2207 mvneta_init_locked(sc);
2208 if (sc->phy_attached)
2209 mii_mediachg(sc->mii);
2210 mvneta_sc_unlock(sc);
2215 mvneta_stop_locked(struct mvneta_softc *sc)
2218 struct mvneta_rx_ring *rx;
2219 struct mvneta_tx_ring *tx;
2224 if (ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2227 mvneta_disable_intr(sc);
2229 callout_stop(&sc->tick_ch);
2231 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2234 if (sc->linkup == TRUE)
2235 mvneta_linkdown(sc);
2237 /* Reset the MAC Port Enable bit */
2238 reg = MVNETA_READ(sc, MVNETA_PMACC0);
2239 reg &= ~MVNETA_PMACC0_PORTEN;
2240 MVNETA_WRITE(sc, MVNETA_PMACC0, reg);
2242 /* Disable each of queue */
2243 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
2244 rx = MVNETA_RX_RING(sc, q);
2246 mvneta_rx_lockq(sc, q);
2247 mvneta_ring_flush_rx_queue(sc, q);
2248 mvneta_rx_unlockq(sc, q);
2252 * Hold Reset state of DMA Engine
2253 * (must write 0x0 to restart it)
2255 MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000001);
2256 MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000001);
2258 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
2259 tx = MVNETA_TX_RING(sc, q);
2261 mvneta_tx_lockq(sc, q);
2262 mvneta_ring_flush_tx_queue(sc, q);
2263 mvneta_tx_unlockq(sc, q);
2268 mvneta_stop(struct mvneta_softc *sc)
2272 mvneta_stop_locked(sc);
2273 mvneta_sc_unlock(sc);
2277 mvneta_mediachange(struct ifnet *ifp)
2279 struct mvneta_softc *sc;
2283 if (!sc->phy_attached && !sc->use_inband_status) {
2284 /* We shouldn't be here */
2285 if_printf(ifp, "Cannot change media in fixed-link mode!\n");
2289 if (sc->use_inband_status) {
2290 mvneta_update_media(sc, sc->mvneta_ifmedia.ifm_media);
2297 mii_mediachg(sc->mii);
2299 mvneta_sc_unlock(sc);
2305 mvneta_get_media(struct mvneta_softc *sc, struct ifmediareq *ifmr)
2309 psr = MVNETA_READ(sc, MVNETA_PSR);
2312 if (psr & MVNETA_PSR_GMIISPEED)
2313 ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_1000_T);
2314 else if (psr & MVNETA_PSR_MIISPEED)
2315 ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_100_TX);
2316 else if (psr & MVNETA_PSR_LINKUP)
2317 ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_10_T);
2320 if (psr & MVNETA_PSR_FULLDX)
2321 ifmr->ifm_active |= IFM_FDX;
2324 ifmr->ifm_status = IFM_AVALID;
2325 if (psr & MVNETA_PSR_LINKUP)
2326 ifmr->ifm_status |= IFM_ACTIVE;
2330 mvneta_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2332 struct mvneta_softc *sc;
2333 struct mii_data *mii;
2337 if (!sc->phy_attached && !sc->use_inband_status) {
2338 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
2344 if (sc->use_inband_status) {
2345 mvneta_get_media(sc, ifmr);
2346 mvneta_sc_unlock(sc);
2353 ifmr->ifm_active = mii->mii_media_active;
2354 ifmr->ifm_status = mii->mii_media_status;
2356 mvneta_sc_unlock(sc);
2363 mvneta_update_autoneg(struct mvneta_softc *sc, int enable)
2368 reg = MVNETA_READ(sc, MVNETA_PANC);
2369 reg &= ~(MVNETA_PANC_FORCELINKFAIL | MVNETA_PANC_FORCELINKPASS |
2370 MVNETA_PANC_ANFCEN);
2371 reg |= MVNETA_PANC_ANDUPLEXEN | MVNETA_PANC_ANSPEEDEN |
2372 MVNETA_PANC_INBANDANEN;
2373 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2375 reg = MVNETA_READ(sc, MVNETA_PMACC2);
2376 reg |= MVNETA_PMACC2_INBANDANMODE;
2377 MVNETA_WRITE(sc, MVNETA_PMACC2, reg);
2379 reg = MVNETA_READ(sc, MVNETA_PSOMSCD);
2380 reg |= MVNETA_PSOMSCD_ENABLE;
2381 MVNETA_WRITE(sc, MVNETA_PSOMSCD, reg);
2383 reg = MVNETA_READ(sc, MVNETA_PANC);
2384 reg &= ~(MVNETA_PANC_FORCELINKFAIL | MVNETA_PANC_FORCELINKPASS |
2385 MVNETA_PANC_ANDUPLEXEN | MVNETA_PANC_ANSPEEDEN |
2386 MVNETA_PANC_INBANDANEN);
2387 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2389 reg = MVNETA_READ(sc, MVNETA_PMACC2);
2390 reg &= ~MVNETA_PMACC2_INBANDANMODE;
2391 MVNETA_WRITE(sc, MVNETA_PMACC2, reg);
2393 reg = MVNETA_READ(sc, MVNETA_PSOMSCD);
2394 reg &= ~MVNETA_PSOMSCD_ENABLE;
2395 MVNETA_WRITE(sc, MVNETA_PSOMSCD, reg);
2400 mvneta_update_media(struct mvneta_softc *sc, int media)
2409 mvneta_linkreset(sc);
2411 running = (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
2413 mvneta_stop_locked(sc);
2415 sc->autoneg = (IFM_SUBTYPE(media) == IFM_AUTO);
2417 if (sc->use_inband_status)
2418 mvneta_update_autoneg(sc, IFM_SUBTYPE(media) == IFM_AUTO);
2420 mvneta_update_eee(sc);
2421 mvneta_update_fc(sc);
2423 if (IFM_SUBTYPE(media) != IFM_AUTO) {
2424 reg = MVNETA_READ(sc, MVNETA_PANC);
2425 reg &= ~(MVNETA_PANC_SETGMIISPEED |
2426 MVNETA_PANC_SETMIISPEED |
2427 MVNETA_PANC_SETFULLDX);
2428 if (IFM_SUBTYPE(media) == IFM_1000_T ||
2429 IFM_SUBTYPE(media) == IFM_2500_T) {
2430 if ((media & IFM_FDX) == 0) {
2431 device_printf(sc->dev,
2432 "%s half-duplex unsupported\n",
2433 IFM_SUBTYPE(media) == IFM_1000_T ?
2439 reg |= MVNETA_PANC_SETGMIISPEED;
2440 } else if (IFM_SUBTYPE(media) == IFM_100_TX)
2441 reg |= MVNETA_PANC_SETMIISPEED;
2443 if (media & IFM_FDX)
2444 reg |= MVNETA_PANC_SETFULLDX;
2446 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2450 mvneta_init_locked(sc);
2451 mvneta_sc_unlock(sc);
2456 mvneta_adjust_link(struct mvneta_softc *sc)
2458 boolean_t phy_linkup;
2462 mvneta_update_eee(sc);
2463 mvneta_update_fc(sc);
2465 /* Check for link change */
2466 phy_linkup = (sc->mii->mii_media_status &
2467 (IFM_AVALID | IFM_ACTIVE)) == (IFM_AVALID | IFM_ACTIVE);
2469 if (sc->linkup != phy_linkup)
2470 mvneta_linkupdate(sc, phy_linkup);
2472 /* Don't update media on disabled link */
2476 /* Check for media type change */
2477 if (sc->mvneta_media != sc->mii->mii_media_active) {
2478 sc->mvneta_media = sc->mii->mii_media_active;
2480 reg = MVNETA_READ(sc, MVNETA_PANC);
2481 reg &= ~(MVNETA_PANC_SETGMIISPEED |
2482 MVNETA_PANC_SETMIISPEED |
2483 MVNETA_PANC_SETFULLDX);
2484 if (IFM_SUBTYPE(sc->mvneta_media) == IFM_1000_T ||
2485 IFM_SUBTYPE(sc->mvneta_media) == IFM_2500_T) {
2486 reg |= MVNETA_PANC_SETGMIISPEED;
2487 } else if (IFM_SUBTYPE(sc->mvneta_media) == IFM_100_TX)
2488 reg |= MVNETA_PANC_SETMIISPEED;
2490 if (sc->mvneta_media & IFM_FDX)
2491 reg |= MVNETA_PANC_SETFULLDX;
2493 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2498 mvneta_link_isr(struct mvneta_softc *sc)
2504 linkup = MVNETA_IS_LINKUP(sc) ? TRUE : FALSE;
2505 if (sc->linkup == linkup)
2511 mvneta_linkdown(sc);
2515 "%s: link %s\n", device_xname(sc->dev), linkup ? "up" : "down");
2520 mvneta_linkupdate(struct mvneta_softc *sc, boolean_t linkup)
2528 mvneta_linkdown(sc);
2532 "%s: link %s\n", device_xname(sc->dev), linkup ? "up" : "down");
2537 mvneta_update_eee(struct mvneta_softc *sc)
2543 /* set EEE parameters */
2544 reg = MVNETA_READ(sc, MVNETA_LPIC1);
2546 reg |= MVNETA_LPIC1_LPIRE;
2548 reg &= ~MVNETA_LPIC1_LPIRE;
2549 MVNETA_WRITE(sc, MVNETA_LPIC1, reg);
2553 mvneta_update_fc(struct mvneta_softc *sc)
2559 reg = MVNETA_READ(sc, MVNETA_PANC);
2561 /* Flow control negotiation */
2562 reg |= MVNETA_PANC_PAUSEADV;
2563 reg |= MVNETA_PANC_ANFCEN;
2565 /* Disable flow control negotiation */
2566 reg &= ~MVNETA_PANC_PAUSEADV;
2567 reg &= ~MVNETA_PANC_ANFCEN;
2570 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2574 mvneta_linkup(struct mvneta_softc *sc)
2580 if (!sc->use_inband_status) {
2581 reg = MVNETA_READ(sc, MVNETA_PANC);
2582 reg |= MVNETA_PANC_FORCELINKPASS;
2583 reg &= ~MVNETA_PANC_FORCELINKFAIL;
2584 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2587 mvneta_qflush(sc->ifp);
2590 if_link_state_change(sc->ifp, LINK_STATE_UP);
2594 mvneta_linkdown(struct mvneta_softc *sc)
2600 if (!sc->use_inband_status) {
2601 reg = MVNETA_READ(sc, MVNETA_PANC);
2602 reg &= ~MVNETA_PANC_FORCELINKPASS;
2603 reg |= MVNETA_PANC_FORCELINKFAIL;
2604 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2607 mvneta_portdown(sc);
2608 mvneta_qflush(sc->ifp);
2610 if_link_state_change(sc->ifp, LINK_STATE_DOWN);
2614 mvneta_linkreset(struct mvneta_softc *sc)
2616 struct mii_softc *mii;
2618 if (sc->phy_attached) {
2619 /* Force reset PHY */
2620 mii = LIST_FIRST(&sc->mii->mii_phys);
2630 mvneta_tx_queue(struct mvneta_softc *sc, struct mbuf **mbufp, int q)
2633 bus_dma_segment_t txsegs[MVNETA_TX_SEGLIMIT];
2634 struct mbuf *mtmp, *mbuf;
2635 struct mvneta_tx_ring *tx;
2636 struct mvneta_buf *txbuf;
2637 struct mvneta_tx_desc *t;
2639 int start, used, error, i, txnsegs;
2642 tx = MVNETA_TX_RING(sc, q);
2643 DASSERT(tx->used >= 0);
2644 DASSERT(tx->used <= MVNETA_TX_RING_CNT);
2648 if (__predict_false(mbuf->m_flags & M_VLANTAG)) {
2649 mbuf = ether_vlanencap(mbuf, mbuf->m_pkthdr.ether_vtag);
2655 mbuf->m_flags &= ~M_VLANTAG;
2659 if (__predict_false(mbuf->m_next != NULL &&
2660 (mbuf->m_pkthdr.csum_flags &
2661 (CSUM_IP | CSUM_TCP | CSUM_UDP)) != 0)) {
2662 if (M_WRITABLE(mbuf) == 0) {
2663 mtmp = m_dup(mbuf, M_NOWAIT);
2670 *mbufp = mbuf = mtmp;
2674 /* load mbuf using dmamap of 1st descriptor */
2675 txbuf = &tx->txbuf[tx->cpu];
2676 error = bus_dmamap_load_mbuf_sg(sc->txmbuf_dtag,
2677 txbuf->dmap, mbuf, txsegs, &txnsegs,
2679 if (__predict_false(error != 0)) {
2681 CTR3(KTR_SPARE2, "%s:%u bus_dmamap_load_mbuf_sg error=%d", ifp->if_xname, q, error);
2683 /* This is the only recoverable error (except EFBIG). */
2684 if (error != ENOMEM) {
2693 if (__predict_false(txnsegs <= 0
2694 || (txnsegs + tx->used) > MVNETA_TX_RING_CNT)) {
2695 /* we have no enough descriptors or mbuf is broken */
2697 CTR3(KTR_SPARE2, "%s:%u not enough descriptors txnsegs=%d",
2698 ifp->if_xname, q, txnsegs);
2700 bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
2703 DASSERT(txbuf->m == NULL);
2705 /* remember mbuf using 1st descriptor */
2707 bus_dmamap_sync(sc->txmbuf_dtag, txbuf->dmap,
2708 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2710 /* load to tx descriptors */
2713 for (i = 0; i < txnsegs; i++) {
2714 t = &tx->desc[tx->cpu];
2718 if (__predict_true(i == 0)) {
2719 /* 1st descriptor */
2720 t->command |= MVNETA_TX_CMD_W_PACKET_OFFSET(0);
2721 t->command |= MVNETA_TX_CMD_F;
2722 mvneta_tx_set_csumflag(ifp, t, mbuf);
2724 t->bufptr_pa = txsegs[i].ds_addr;
2725 t->bytecnt = txsegs[i].ds_len;
2726 tx->cpu = tx_counter_adv(tx->cpu, 1);
2731 /* t is last descriptor here */
2733 t->command |= MVNETA_TX_CMD_L|MVNETA_TX_CMD_PADDING;
2735 bus_dmamap_sync(sc->tx_dtag, tx->desc_map,
2736 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2738 while (__predict_false(used > 255)) {
2739 ptxsu = MVNETA_PTXSU_NOWD(255);
2740 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
2743 if (__predict_true(used > 0)) {
2744 ptxsu = MVNETA_PTXSU_NOWD(used);
2745 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
2751 mvneta_tx_set_csumflag(struct ifnet *ifp,
2752 struct mvneta_tx_desc *t, struct mbuf *m)
2754 struct ether_header *eh;
2756 uint32_t iphl, ipoff;
2760 csum_flags = ifp->if_hwassist & m->m_pkthdr.csum_flags;
2761 eh = mtod(m, struct ether_header *);
2762 switch (ntohs(eh->ether_type)) {
2764 ipoff = ETHER_HDR_LEN;
2766 case ETHERTYPE_IPV6:
2768 case ETHERTYPE_VLAN:
2769 ipoff = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2773 if (__predict_true(csum_flags & (CSUM_IP|CSUM_IP_TCP|CSUM_IP_UDP))) {
2774 ip = (struct ip *)(m->m_data + ipoff);
2775 iphl = ip->ip_hl<<2;
2776 t->command |= MVNETA_TX_CMD_L3_IP4;
2778 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NONE;
2784 if (csum_flags & CSUM_IP) {
2785 t->command |= MVNETA_TX_CMD_IP4_CHECKSUM;
2789 if (csum_flags & CSUM_IP_TCP) {
2790 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG;
2791 t->command |= MVNETA_TX_CMD_L4_TCP;
2792 } else if (csum_flags & CSUM_IP_UDP) {
2793 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG;
2794 t->command |= MVNETA_TX_CMD_L4_UDP;
2796 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NONE;
2799 t->command |= MVNETA_TX_CMD_IP_HEADER_LEN(iphl >> 2);
2800 t->command |= MVNETA_TX_CMD_L3_OFFSET(ipoff);
2804 mvneta_tx_queue_complete(struct mvneta_softc *sc, int q)
2806 struct mvneta_tx_ring *tx;
2807 struct mvneta_buf *txbuf;
2808 struct mvneta_tx_desc *t;
2809 uint32_t ptxs, ptxsu, ndesc;
2812 KASSERT_TX_MTX(sc, q);
2814 tx = MVNETA_TX_RING(sc, q);
2815 if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED))
2818 ptxs = MVNETA_READ(sc, MVNETA_PTXS(q));
2819 ndesc = MVNETA_PTXS_GET_TBC(ptxs);
2821 if (__predict_false(ndesc == 0)) {
2823 tx->queue_status = MVNETA_QUEUE_IDLE;
2824 else if (tx->queue_status == MVNETA_QUEUE_WORKING &&
2825 ((ticks - tx->watchdog_time) > MVNETA_WATCHDOG))
2826 tx->queue_hung = TRUE;
2831 CTR3(KTR_SPARE2, "%s:%u tx_complete begin ndesc=%u",
2832 sc->ifp->if_xname, q, ndesc);
2835 bus_dmamap_sync(sc->tx_dtag, tx->desc_map,
2836 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2838 for (i = 0; i < ndesc; i++) {
2839 t = &tx->desc[tx->dma];
2841 if (t->flags & MVNETA_TX_F_ES)
2842 CTR3(KTR_SPARE2, "%s tx error queue %d desc %d",
2843 sc->ifp->if_xname, q, tx->dma);
2845 txbuf = &tx->txbuf[tx->dma];
2846 if (__predict_true(txbuf->m != NULL)) {
2847 DASSERT((t->command & MVNETA_TX_CMD_F) != 0);
2848 bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
2853 DASSERT((t->flags & MVNETA_TX_CMD_F) == 0);
2854 tx->dma = tx_counter_adv(tx->dma, 1);
2857 DASSERT(tx->used >= 0);
2858 DASSERT(tx->used <= MVNETA_TX_RING_CNT);
2859 while (__predict_false(ndesc > 255)) {
2860 ptxsu = MVNETA_PTXSU_NORB(255);
2861 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
2864 if (__predict_true(ndesc > 0)) {
2865 ptxsu = MVNETA_PTXSU_NORB(ndesc);
2866 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
2869 CTR5(KTR_SPARE2, "%s:%u tx_complete tx_cpu=%d tx_dma=%d tx_used=%d",
2870 sc->ifp->if_xname, q, tx->cpu, tx->dma, tx->used);
2873 tx->watchdog_time = ticks;
2876 tx->queue_status = MVNETA_QUEUE_IDLE;
2880 * Do a final TX complete when TX is idle.
2883 mvneta_tx_drain(struct mvneta_softc *sc)
2885 struct mvneta_tx_ring *tx;
2889 * Handle trailing mbuf on TX queue.
2890 * Check is done lockess to avoid TX path contention.
2892 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
2893 tx = MVNETA_TX_RING(sc, q);
2894 if ((ticks - tx->watchdog_time) > MVNETA_WATCHDOG_TXCOMP &&
2896 mvneta_tx_lockq(sc, q);
2897 mvneta_tx_queue_complete(sc, q);
2898 mvneta_tx_unlockq(sc, q);
2907 mvneta_rx(struct mvneta_softc *sc, int q, int count)
2909 uint32_t prxs, npkt;
2913 mvneta_rx_lockq(sc, q);
2914 prxs = MVNETA_READ(sc, MVNETA_PRXS(q));
2915 npkt = MVNETA_PRXS_GET_ODC(prxs);
2916 if (__predict_false(npkt == 0))
2919 if (count > 0 && npkt > count) {
2923 mvneta_rx_queue(sc, q, npkt);
2925 mvneta_rx_unlockq(sc, q);
2930 * Helper routine for updating PRXSU register of a given queue.
2931 * Handles number of processed descriptors bigger than maximum acceptable value.
2933 STATIC __inline void
2934 mvneta_prxsu_update(struct mvneta_softc *sc, int q, int processed)
2938 while (__predict_false(processed > 255)) {
2939 prxsu = MVNETA_PRXSU_NOOFPROCESSEDDESCRIPTORS(255);
2940 MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
2943 prxsu = MVNETA_PRXSU_NOOFPROCESSEDDESCRIPTORS(processed);
2944 MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
2947 static __inline void
2948 mvneta_prefetch(void *p)
2951 __builtin_prefetch(p);
2955 mvneta_rx_queue(struct mvneta_softc *sc, int q, int npkt)
2958 struct mvneta_rx_ring *rx;
2959 struct mvneta_rx_desc *r;
2960 struct mvneta_buf *rxbuf;
2962 struct lro_ctrl *lro;
2963 struct lro_entry *queued;
2965 int i, pktlen, processed, ndma;
2967 KASSERT_RX_MTX(sc, q);
2970 rx = MVNETA_RX_RING(sc, q);
2973 if (__predict_false(rx->queue_status == MVNETA_QUEUE_DISABLED))
2976 bus_dmamap_sync(sc->rx_dtag, rx->desc_map,
2977 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2979 for (i = 0; i < npkt; i++) {
2980 /* Prefetch next desc, rxbuf. */
2981 ndma = rx_counter_adv(rx->dma, 1);
2982 mvneta_prefetch(&rx->desc[ndma]);
2983 mvneta_prefetch(&rx->rxbuf[ndma]);
2985 /* get descriptor and packet */
2986 r = &rx->desc[rx->dma];
2987 rxbuf = &rx->rxbuf[rx->dma];
2991 bus_dmamap_sync(sc->rxbuf_dtag, rxbuf->dmap,
2992 BUS_DMASYNC_POSTREAD);
2993 bus_dmamap_unload(sc->rxbuf_dtag, rxbuf->dmap);
2994 /* Prefetch mbuf header. */
2998 /* Drop desc with error status or not in a single buffer. */
2999 DASSERT((r->status & (MVNETA_RX_F|MVNETA_RX_L)) ==
3000 (MVNETA_RX_F|MVNETA_RX_L));
3001 if (__predict_false((r->status & MVNETA_RX_ES) ||
3002 (r->status & (MVNETA_RX_F|MVNETA_RX_L)) !=
3003 (MVNETA_RX_F|MVNETA_RX_L)))
3007 * [ OFF | MH | PKT | CRC ]
3008 * bytecnt cover MH, PKT, CRC
3010 pktlen = r->bytecnt - ETHER_CRC_LEN - MVNETA_HWHEADER_SIZE;
3011 pktbuf = (uint8_t *)r->bufptr_va + MVNETA_PACKET_OFFSET +
3012 MVNETA_HWHEADER_SIZE;
3014 /* Prefetch mbuf data. */
3015 mvneta_prefetch(pktbuf);
3017 /* Write value to mbuf (avoid read). */
3019 m->m_len = m->m_pkthdr.len = pktlen;
3020 m->m_pkthdr.rcvif = ifp;
3021 mvneta_rx_set_csumflag(ifp, r, m);
3023 /* Increase rx_dma before releasing the lock. */
3026 if (__predict_false(rx->lro_enabled &&
3027 ((r->status & MVNETA_RX_L3_IP) != 0) &&
3028 ((r->status & MVNETA_RX_L4_MASK) == MVNETA_RX_L4_TCP) &&
3029 (m->m_pkthdr.csum_flags &
3030 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
3031 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR))) {
3032 if (rx->lro.lro_cnt != 0) {
3033 if (tcp_lro_rx(&rx->lro, m, 0) == 0)
3038 mvneta_rx_unlockq(sc, q);
3039 (*ifp->if_input)(ifp, m);
3040 mvneta_rx_lockq(sc, q);
3042 * Check whether this queue has been disabled in the
3043 * meantime. If yes, then clear LRO and exit.
3045 if(__predict_false(rx->queue_status == MVNETA_QUEUE_DISABLED))
3048 /* Refresh receive ring to avoid stall and minimize jitter. */
3049 if (processed >= MVNETA_RX_REFILL_COUNT) {
3050 mvneta_prxsu_update(sc, q, processed);
3051 mvneta_rx_queue_refill(sc, q);
3058 /* Refresh receive ring to avoid stall and minimize jitter. */
3059 if (processed >= MVNETA_RX_REFILL_COUNT) {
3060 mvneta_prxsu_update(sc, q, processed);
3061 mvneta_rx_queue_refill(sc, q);
3066 CTR3(KTR_SPARE2, "%s:%u %u packets received", ifp->if_xname, q, npkt);
3068 /* DMA status update */
3069 mvneta_prxsu_update(sc, q, processed);
3070 /* Refill the rest of buffers if there are any to refill */
3071 mvneta_rx_queue_refill(sc, q);
3075 * Flush any outstanding LRO work
3078 while (__predict_false((queued = LIST_FIRST(&lro->lro_active)) != NULL)) {
3079 LIST_REMOVE(LIST_FIRST((&lro->lro_active)), next);
3080 tcp_lro_flush(lro, queued);
3085 mvneta_rx_buf_free(struct mvneta_softc *sc, struct mvneta_buf *rxbuf)
3088 bus_dmamap_unload(sc->rxbuf_dtag, rxbuf->dmap);
3089 /* This will remove all data at once */
3094 mvneta_rx_queue_refill(struct mvneta_softc *sc, int q)
3096 struct mvneta_rx_ring *rx;
3097 struct mvneta_rx_desc *r;
3098 struct mvneta_buf *rxbuf;
3099 bus_dma_segment_t segs;
3101 uint32_t prxs, prxsu, ndesc;
3102 int npkt, refill, nsegs, error;
3104 KASSERT_RX_MTX(sc, q);
3106 rx = MVNETA_RX_RING(sc, q);
3107 prxs = MVNETA_READ(sc, MVNETA_PRXS(q));
3108 ndesc = MVNETA_PRXS_GET_NODC(prxs) + MVNETA_PRXS_GET_ODC(prxs);
3109 refill = MVNETA_RX_RING_CNT - ndesc;
3111 CTR3(KTR_SPARE2, "%s:%u refill %u packets", sc->ifp->if_xname, q,
3114 if (__predict_false(refill <= 0))
3117 for (npkt = 0; npkt < refill; npkt++) {
3118 rxbuf = &rx->rxbuf[rx->cpu];
3119 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3120 if (__predict_false(m == NULL)) {
3124 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3126 error = bus_dmamap_load_mbuf_sg(sc->rxbuf_dtag, rxbuf->dmap,
3127 m, &segs, &nsegs, BUS_DMA_NOWAIT);
3128 if (__predict_false(error != 0 || nsegs != 1)) {
3129 KASSERT(1, ("Failed to load Rx mbuf DMA map"));
3134 /* Add the packet to the ring */
3136 r = &rx->desc[rx->cpu];
3137 r->bufptr_pa = segs.ds_addr;
3138 r->bufptr_va = (uint32_t)m->m_data;
3140 rx->cpu = rx_counter_adv(rx->cpu, 1);
3143 if (refill == MVNETA_RX_RING_CNT)
3144 rx->needs_refill = TRUE;
3148 rx->needs_refill = FALSE;
3149 bus_dmamap_sync(sc->rx_dtag, rx->desc_map, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3151 while (__predict_false(npkt > 255)) {
3152 prxsu = MVNETA_PRXSU_NOOFNEWDESCRIPTORS(255);
3153 MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
3156 if (__predict_true(npkt > 0)) {
3157 prxsu = MVNETA_PRXSU_NOOFNEWDESCRIPTORS(npkt);
3158 MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
3162 STATIC __inline void
3163 mvneta_rx_set_csumflag(struct ifnet *ifp,
3164 struct mvneta_rx_desc *r, struct mbuf *m)
3166 uint32_t csum_flags;
3169 if (__predict_false((r->status &
3170 (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP)) == 0))
3171 return; /* not a IP packet */
3174 if (__predict_true((r->status & MVNETA_RX_IP_HEADER_OK) ==
3175 MVNETA_RX_IP_HEADER_OK))
3176 csum_flags |= CSUM_L3_CALC|CSUM_L3_VALID;
3178 if (__predict_true((r->status & (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP)) ==
3179 (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP))) {
3181 switch (r->status & MVNETA_RX_L4_MASK) {
3182 case MVNETA_RX_L4_TCP:
3183 case MVNETA_RX_L4_UDP:
3184 csum_flags |= CSUM_L4_CALC;
3185 if (__predict_true((r->status &
3186 MVNETA_RX_L4_CHECKSUM_OK) == MVNETA_RX_L4_CHECKSUM_OK)) {
3187 csum_flags |= CSUM_L4_VALID;
3188 m->m_pkthdr.csum_data = htons(0xffff);
3191 case MVNETA_RX_L4_OTH:
3196 m->m_pkthdr.csum_flags = csum_flags;
3200 * MAC address filter
3203 mvneta_filter_setup(struct mvneta_softc *sc)
3206 uint32_t dfut[MVNETA_NDFUT], dfsmt[MVNETA_NDFSMT], dfomt[MVNETA_NDFOMT];
3212 memset(dfut, 0, sizeof(dfut));
3213 memset(dfsmt, 0, sizeof(dfsmt));
3214 memset(dfomt, 0, sizeof(dfomt));
3217 ifp->if_flags |= IFF_ALLMULTI;
3218 if (ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) {
3219 for (i = 0; i < MVNETA_NDFSMT; i++) {
3220 dfsmt[i] = dfomt[i] =
3221 MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3222 MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3223 MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3224 MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
3228 pxc = MVNETA_READ(sc, MVNETA_PXC);
3229 pxc &= ~(MVNETA_PXC_UPM | MVNETA_PXC_RXQ_MASK | MVNETA_PXC_RXQARP_MASK |
3230 MVNETA_PXC_TCPQ_MASK | MVNETA_PXC_UDPQ_MASK | MVNETA_PXC_BPDUQ_MASK);
3231 pxc |= MVNETA_PXC_RXQ(MVNETA_RX_QNUM_MAX-1);
3232 pxc |= MVNETA_PXC_RXQARP(MVNETA_RX_QNUM_MAX-1);
3233 pxc |= MVNETA_PXC_TCPQ(MVNETA_RX_QNUM_MAX-1);
3234 pxc |= MVNETA_PXC_UDPQ(MVNETA_RX_QNUM_MAX-1);
3235 pxc |= MVNETA_PXC_BPDUQ(MVNETA_RX_QNUM_MAX-1);
3236 pxc |= MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP;
3237 if (ifp->if_flags & IFF_BROADCAST) {
3238 pxc &= ~(MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP);
3240 if (ifp->if_flags & IFF_PROMISC) {
3241 pxc |= MVNETA_PXC_UPM;
3243 MVNETA_WRITE(sc, MVNETA_PXC, pxc);
3245 /* Set Destination Address Filter Unicast Table */
3246 if (ifp->if_flags & IFF_PROMISC) {
3247 /* pass all unicast addresses */
3248 for (i = 0; i < MVNETA_NDFUT; i++) {
3250 MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3251 MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3252 MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3253 MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
3256 i = sc->enaddr[5] & 0xf; /* last nibble */
3257 dfut[i>>2] = MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
3259 MVNETA_WRITE_REGION(sc, MVNETA_DFUT(0), dfut, MVNETA_NDFUT);
3261 /* Set Destination Address Filter Multicast Tables */
3262 MVNETA_WRITE_REGION(sc, MVNETA_DFSMT(0), dfsmt, MVNETA_NDFSMT);
3263 MVNETA_WRITE_REGION(sc, MVNETA_DFOMT(0), dfomt, MVNETA_NDFOMT);
3270 sysctl_read_mib(SYSCTL_HANDLER_ARGS)
3272 struct mvneta_sysctl_mib *arg;
3273 struct mvneta_softc *sc;
3276 arg = (struct mvneta_sysctl_mib *)arg1;
3283 if (arg->index < 0 || arg->index > MVNETA_PORTMIB_NOCOUNTER)
3288 mvneta_sc_unlock(sc);
3289 return sysctl_handle_64(oidp, &val, 0, req);
3294 sysctl_clear_mib(SYSCTL_HANDLER_ARGS)
3296 struct mvneta_softc *sc;
3300 sc = (struct mvneta_softc *)arg1;
3304 err = sysctl_handle_int(oidp, &val, 0, req);
3308 if (val < 0 || val > 1)
3313 mvneta_clear_mib(sc);
3314 mvneta_sc_unlock(sc);
3321 sysctl_set_queue_rxthtime(SYSCTL_HANDLER_ARGS)
3323 struct mvneta_sysctl_queue *arg;
3324 struct mvneta_rx_ring *rx;
3325 struct mvneta_softc *sc;
3326 uint32_t reg, time_mvtclk;
3330 arg = (struct mvneta_sysctl_queue *)arg1;
3333 if (arg->queue < 0 || arg->queue > MVNETA_RX_RING_CNT)
3335 if (arg->rxtx != MVNETA_SYSCTL_RX)
3342 /* read queue length */
3344 mvneta_rx_lockq(sc, arg->queue);
3345 rx = MVNETA_RX_RING(sc, arg->queue);
3346 time_mvtclk = rx->queue_th_time;
3347 time_us = ((uint64_t)time_mvtclk * 1000ULL * 1000ULL) / get_tclk();
3348 mvneta_rx_unlockq(sc, arg->queue);
3349 mvneta_sc_unlock(sc);
3351 err = sysctl_handle_int(oidp, &time_us, 0, req);
3356 mvneta_rx_lockq(sc, arg->queue);
3358 /* update queue length (0[sec] - 1[sec]) */
3359 if (time_us < 0 || time_us > (1000 * 1000)) {
3360 mvneta_rx_unlockq(sc, arg->queue);
3361 mvneta_sc_unlock(sc);
3365 (uint64_t)get_tclk() * (uint64_t)time_us / (1000ULL * 1000ULL);
3366 rx->queue_th_time = time_mvtclk;
3367 reg = MVNETA_PRXITTH_RITT(rx->queue_th_time);
3368 MVNETA_WRITE(sc, MVNETA_PRXITTH(arg->queue), reg);
3369 mvneta_rx_unlockq(sc, arg->queue);
3370 mvneta_sc_unlock(sc);
3376 sysctl_mvneta_init(struct mvneta_softc *sc)
3378 struct sysctl_ctx_list *ctx;
3379 struct sysctl_oid_list *children;
3380 struct sysctl_oid_list *rxchildren;
3381 struct sysctl_oid_list *qchildren, *mchildren;
3382 struct sysctl_oid *tree;
3384 struct mvneta_sysctl_queue *rxarg;
3385 #define MVNETA_SYSCTL_NAME(num) "queue" # num
3386 static const char *sysctl_queue_names[] = {
3387 MVNETA_SYSCTL_NAME(0), MVNETA_SYSCTL_NAME(1),
3388 MVNETA_SYSCTL_NAME(2), MVNETA_SYSCTL_NAME(3),
3389 MVNETA_SYSCTL_NAME(4), MVNETA_SYSCTL_NAME(5),
3390 MVNETA_SYSCTL_NAME(6), MVNETA_SYSCTL_NAME(7),
3392 #undef MVNETA_SYSCTL_NAME
3394 #define MVNETA_SYSCTL_DESCR(num) "configuration parameters for queue " # num
3395 static const char *sysctl_queue_descrs[] = {
3396 MVNETA_SYSCTL_DESCR(0), MVNETA_SYSCTL_DESCR(1),
3397 MVNETA_SYSCTL_DESCR(2), MVNETA_SYSCTL_DESCR(3),
3398 MVNETA_SYSCTL_DESCR(4), MVNETA_SYSCTL_DESCR(5),
3399 MVNETA_SYSCTL_DESCR(6), MVNETA_SYSCTL_DESCR(7),
3401 #undef MVNETA_SYSCTL_DESCR
3404 ctx = device_get_sysctl_ctx(sc->dev);
3405 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
3407 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rx",
3408 CTLFLAG_RD, 0, "NETA RX");
3409 rxchildren = SYSCTL_CHILDREN(tree);
3410 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "mib",
3411 CTLFLAG_RD, 0, "NETA MIB");
3412 mchildren = SYSCTL_CHILDREN(tree);
3415 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "flow_control",
3416 CTLFLAG_RW, &sc->cf_fc, 0, "flow control");
3417 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lpi",
3418 CTLFLAG_RW, &sc->cf_lpi, 0, "Low Power Idle");
3423 /* dev.mvneta.[unit].mib.<mibs> */
3424 for (i = 0; i < MVNETA_PORTMIB_NOCOUNTER; i++) {
3425 const char *name = mvneta_mib_list[i].sysctl_name;
3426 const char *desc = mvneta_mib_list[i].desc;
3427 struct mvneta_sysctl_mib *mib_arg = &sc->sysctl_mib[i];
3431 SYSCTL_ADD_PROC(ctx, mchildren, OID_AUTO, name,
3432 CTLTYPE_U64|CTLFLAG_RD, (void *)mib_arg, 0,
3433 sysctl_read_mib, "I", desc);
3435 SYSCTL_ADD_UQUAD(ctx, mchildren, OID_AUTO, "rx_discard",
3436 CTLFLAG_RD, &sc->counter_pdfc, "Port Rx Discard Frame Counter");
3437 SYSCTL_ADD_UQUAD(ctx, mchildren, OID_AUTO, "overrun",
3438 CTLFLAG_RD, &sc->counter_pofc, "Port Overrun Frame Counter");
3439 SYSCTL_ADD_UINT(ctx, mchildren, OID_AUTO, "watchdog",
3440 CTLFLAG_RD, &sc->counter_watchdog, 0, "TX Watchdog Counter");
3442 SYSCTL_ADD_PROC(ctx, mchildren, OID_AUTO, "reset",
3443 CTLTYPE_INT|CTLFLAG_RW, (void *)sc, 0,
3444 sysctl_clear_mib, "I", "Reset MIB counters");
3446 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
3447 rxarg = &sc->sysctl_rx_queue[q];
3451 rxarg->rxtx = MVNETA_SYSCTL_RX;
3453 /* hw.mvneta.mvneta[unit].rx.[queue] */
3454 tree = SYSCTL_ADD_NODE(ctx, rxchildren, OID_AUTO,
3455 sysctl_queue_names[q], CTLFLAG_RD, 0,
3456 sysctl_queue_descrs[q]);
3457 qchildren = SYSCTL_CHILDREN(tree);
3459 /* hw.mvneta.mvneta[unit].rx.[queue].threshold_timer_us */
3460 SYSCTL_ADD_PROC(ctx, qchildren, OID_AUTO, "threshold_timer_us",
3461 CTLTYPE_UINT | CTLFLAG_RW, rxarg, 0,
3462 sysctl_set_queue_rxthtime, "I",
3463 "interrupt coalescing threshold timer [us]");
3471 mvneta_clear_mib(struct mvneta_softc *sc)
3477 for (i = 0; i < nitems(mvneta_mib_list); i++) {
3478 if (mvneta_mib_list[i].reg64)
3479 MVNETA_READ_MIB_8(sc, mvneta_mib_list[i].regnum);
3481 MVNETA_READ_MIB_4(sc, mvneta_mib_list[i].regnum);
3482 sc->sysctl_mib[i].counter = 0;
3484 MVNETA_READ(sc, MVNETA_PDFC);
3485 sc->counter_pdfc = 0;
3486 MVNETA_READ(sc, MVNETA_POFC);
3487 sc->counter_pofc = 0;
3488 sc->counter_watchdog = 0;
3492 mvneta_update_mib(struct mvneta_softc *sc)
3494 struct mvneta_tx_ring *tx;
3499 for (i = 0; i < nitems(mvneta_mib_list); i++) {
3501 if (mvneta_mib_list[i].reg64)
3502 val = MVNETA_READ_MIB_8(sc, mvneta_mib_list[i].regnum);
3504 val = MVNETA_READ_MIB_4(sc, mvneta_mib_list[i].regnum);
3509 sc->sysctl_mib[i].counter += val;
3510 switch (mvneta_mib_list[i].regnum) {
3511 case MVNETA_MIB_RX_GOOD_OCT:
3512 if_inc_counter(sc->ifp, IFCOUNTER_IBYTES, val);
3514 case MVNETA_MIB_RX_BAD_FRAME:
3515 if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, val);
3517 case MVNETA_MIB_RX_GOOD_FRAME:
3518 if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, val);
3520 case MVNETA_MIB_RX_MCAST_FRAME:
3521 if_inc_counter(sc->ifp, IFCOUNTER_IMCASTS, val);
3523 case MVNETA_MIB_TX_GOOD_OCT:
3524 if_inc_counter(sc->ifp, IFCOUNTER_OBYTES, val);
3526 case MVNETA_MIB_TX_GOOD_FRAME:
3527 if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, val);
3529 case MVNETA_MIB_TX_MCAST_FRAME:
3530 if_inc_counter(sc->ifp, IFCOUNTER_OMCASTS, val);
3532 case MVNETA_MIB_MAC_COL:
3533 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, val);
3535 case MVNETA_MIB_TX_MAC_TRNS_ERR:
3536 case MVNETA_MIB_TX_EXCES_COL:
3537 case MVNETA_MIB_MAC_LATE_COL:
3538 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, val);
3543 reg = MVNETA_READ(sc, MVNETA_PDFC);
3544 sc->counter_pdfc += reg;
3545 if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, reg);
3546 reg = MVNETA_READ(sc, MVNETA_POFC);
3547 sc->counter_pofc += reg;
3548 if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, reg);
3551 if (sc->counter_watchdog_mib > 0) {
3552 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, sc->counter_watchdog_mib);
3553 sc->counter_watchdog_mib = 0;
3557 * We do not take queue locks to not disrupt TX path.
3558 * We may only miss one drv error which will be fixed at
3559 * next mib update. We may also clear counter when TX path
3560 * is incrementing it but we only do it if counter was not zero
3561 * thus we may only loose one error.
3563 for (i = 0; i < MVNETA_TX_QNUM_MAX; i++) {
3564 tx = MVNETA_TX_RING(sc, i);
3566 if (tx->drv_error > 0) {
3567 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, tx->drv_error);