2 * Copyright (c) 2017 Stormshield.
3 * Copyright (c) 2017 Semihalf.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
28 #include "opt_platform.h"
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/endian.h>
37 #include <sys/mutex.h>
38 #include <sys/kernel.h>
39 #include <sys/module.h>
40 #include <sys/socket.h>
41 #include <sys/sysctl.h>
43 #include <sys/taskqueue.h>
48 #include <net/ethernet.h>
51 #include <net/if_arp.h>
52 #include <net/if_dl.h>
53 #include <net/if_media.h>
54 #include <net/if_types.h>
55 #include <net/if_vlan_var.h>
57 #include <netinet/in_systm.h>
58 #include <netinet/in.h>
59 #include <netinet/ip.h>
60 #include <netinet/tcp_lro.h>
62 #include <sys/sockio.h>
64 #include <machine/bus.h>
66 #include <machine/resource.h>
68 #include <dev/mii/mii.h>
69 #include <dev/mii/miivar.h>
71 #include <dev/ofw/openfirm.h>
72 #include <dev/ofw/ofw_bus.h>
73 #include <dev/ofw/ofw_bus_subr.h>
75 #include <dev/mdio/mdio.h>
77 #include <arm/mv/mvvar.h>
79 #if !defined(__aarch64__)
80 #include <arm/mv/mvreg.h>
81 #include <arm/mv/mvwin.h>
84 #include "if_mvnetareg.h"
85 #include "if_mvnetavar.h"
87 #include "miibus_if.h"
91 #define STATIC /* nothing */
96 #define DASSERT(x) KASSERT((x), (#x))
98 #define A3700_TCLK_250MHZ 250000000
100 /* Device Register Initialization */
101 STATIC int mvneta_initreg(struct ifnet *);
103 /* Descriptor Ring Control for each of queues */
104 STATIC int mvneta_ring_alloc_rx_queue(struct mvneta_softc *, int);
105 STATIC int mvneta_ring_alloc_tx_queue(struct mvneta_softc *, int);
106 STATIC void mvneta_ring_dealloc_rx_queue(struct mvneta_softc *, int);
107 STATIC void mvneta_ring_dealloc_tx_queue(struct mvneta_softc *, int);
108 STATIC int mvneta_ring_init_rx_queue(struct mvneta_softc *, int);
109 STATIC int mvneta_ring_init_tx_queue(struct mvneta_softc *, int);
110 STATIC void mvneta_ring_flush_rx_queue(struct mvneta_softc *, int);
111 STATIC void mvneta_ring_flush_tx_queue(struct mvneta_softc *, int);
112 STATIC void mvneta_dmamap_cb(void *, bus_dma_segment_t *, int, int);
113 STATIC int mvneta_dma_create(struct mvneta_softc *);
115 /* Rx/Tx Queue Control */
116 STATIC int mvneta_rx_queue_init(struct ifnet *, int);
117 STATIC int mvneta_tx_queue_init(struct ifnet *, int);
118 STATIC int mvneta_rx_queue_enable(struct ifnet *, int);
119 STATIC int mvneta_tx_queue_enable(struct ifnet *, int);
120 STATIC void mvneta_rx_lockq(struct mvneta_softc *, int);
121 STATIC void mvneta_rx_unlockq(struct mvneta_softc *, int);
122 STATIC void mvneta_tx_lockq(struct mvneta_softc *, int);
123 STATIC void mvneta_tx_unlockq(struct mvneta_softc *, int);
125 /* Interrupt Handlers */
126 STATIC void mvneta_disable_intr(struct mvneta_softc *);
127 STATIC void mvneta_enable_intr(struct mvneta_softc *);
128 STATIC void mvneta_rxtxth_intr(void *);
129 STATIC int mvneta_misc_intr(struct mvneta_softc *);
130 STATIC void mvneta_tick(void *);
131 /* struct ifnet and mii callbacks*/
132 STATIC int mvneta_xmitfast_locked(struct mvneta_softc *, int, struct mbuf **);
133 STATIC int mvneta_xmit_locked(struct mvneta_softc *, int);
134 #ifdef MVNETA_MULTIQUEUE
135 STATIC int mvneta_transmit(struct ifnet *, struct mbuf *);
136 #else /* !MVNETA_MULTIQUEUE */
137 STATIC void mvneta_start(struct ifnet *);
139 STATIC void mvneta_qflush(struct ifnet *);
140 STATIC void mvneta_tx_task(void *, int);
141 STATIC int mvneta_ioctl(struct ifnet *, u_long, caddr_t);
142 STATIC void mvneta_init(void *);
143 STATIC void mvneta_init_locked(void *);
144 STATIC void mvneta_stop(struct mvneta_softc *);
145 STATIC void mvneta_stop_locked(struct mvneta_softc *);
146 STATIC int mvneta_mediachange(struct ifnet *);
147 STATIC void mvneta_mediastatus(struct ifnet *, struct ifmediareq *);
148 STATIC void mvneta_portup(struct mvneta_softc *);
149 STATIC void mvneta_portdown(struct mvneta_softc *);
151 /* Link State Notify */
152 STATIC void mvneta_update_autoneg(struct mvneta_softc *, int);
153 STATIC int mvneta_update_media(struct mvneta_softc *, int);
154 STATIC void mvneta_adjust_link(struct mvneta_softc *);
155 STATIC void mvneta_update_eee(struct mvneta_softc *);
156 STATIC void mvneta_update_fc(struct mvneta_softc *);
157 STATIC void mvneta_link_isr(struct mvneta_softc *);
158 STATIC void mvneta_linkupdate(struct mvneta_softc *, boolean_t);
159 STATIC void mvneta_linkup(struct mvneta_softc *);
160 STATIC void mvneta_linkdown(struct mvneta_softc *);
161 STATIC void mvneta_linkreset(struct mvneta_softc *);
164 STATIC int mvneta_tx_queue(struct mvneta_softc *, struct mbuf **, int);
165 STATIC void mvneta_tx_set_csumflag(struct ifnet *,
166 struct mvneta_tx_desc *, struct mbuf *);
167 STATIC void mvneta_tx_queue_complete(struct mvneta_softc *, int);
168 STATIC void mvneta_tx_drain(struct mvneta_softc *);
171 STATIC int mvneta_rx(struct mvneta_softc *, int, int);
172 STATIC void mvneta_rx_queue(struct mvneta_softc *, int, int);
173 STATIC void mvneta_rx_queue_refill(struct mvneta_softc *, int);
174 STATIC void mvneta_rx_set_csumflag(struct ifnet *,
175 struct mvneta_rx_desc *, struct mbuf *);
176 STATIC void mvneta_rx_buf_free(struct mvneta_softc *, struct mvneta_buf *);
178 /* MAC address filter */
179 STATIC void mvneta_filter_setup(struct mvneta_softc *);
182 STATIC int sysctl_read_mib(SYSCTL_HANDLER_ARGS);
183 STATIC int sysctl_clear_mib(SYSCTL_HANDLER_ARGS);
184 STATIC int sysctl_set_queue_rxthtime(SYSCTL_HANDLER_ARGS);
185 STATIC void sysctl_mvneta_init(struct mvneta_softc *);
188 STATIC void mvneta_clear_mib(struct mvneta_softc *);
189 STATIC void mvneta_update_mib(struct mvneta_softc *);
192 STATIC boolean_t mvneta_find_ethernet_prop_switch(phandle_t, phandle_t);
193 STATIC boolean_t mvneta_has_switch(device_t);
195 #define mvneta_sc_lock(sc) mtx_lock(&sc->mtx)
196 #define mvneta_sc_unlock(sc) mtx_unlock(&sc->mtx)
198 STATIC struct mtx mii_mutex;
199 STATIC int mii_init = 0;
202 STATIC int mvneta_detach(device_t);
204 STATIC int mvneta_miibus_readreg(device_t, int, int);
205 STATIC int mvneta_miibus_writereg(device_t, int, int, int);
208 STATIC uint32_t mvneta_get_clk(void);
210 static device_method_t mvneta_methods[] = {
211 /* Device interface */
212 DEVMETHOD(device_detach, mvneta_detach),
214 DEVMETHOD(miibus_readreg, mvneta_miibus_readreg),
215 DEVMETHOD(miibus_writereg, mvneta_miibus_writereg),
217 DEVMETHOD(mdio_readreg, mvneta_miibus_readreg),
218 DEVMETHOD(mdio_writereg, mvneta_miibus_writereg),
224 DEFINE_CLASS_0(mvneta, mvneta_driver, mvneta_methods, sizeof(struct mvneta_softc));
226 DRIVER_MODULE(miibus, mvneta, miibus_driver, miibus_devclass, 0, 0);
227 DRIVER_MODULE(mdio, mvneta, mdio_driver, mdio_devclass, 0, 0);
228 MODULE_DEPEND(mvneta, mdio, 1, 1, 1);
229 MODULE_DEPEND(mvneta, ether, 1, 1, 1);
230 MODULE_DEPEND(mvneta, miibus, 1, 1, 1);
231 MODULE_DEPEND(mvneta, mvxpbm, 1, 1, 1);
234 * List of MIB register and names
238 MVNETA_MIB_RX_GOOD_OCT_IDX,
239 MVNETA_MIB_RX_BAD_OCT_IDX,
240 MVNETA_MIB_TX_MAC_TRNS_ERR_IDX,
241 MVNETA_MIB_RX_GOOD_FRAME_IDX,
242 MVNETA_MIB_RX_BAD_FRAME_IDX,
243 MVNETA_MIB_RX_BCAST_FRAME_IDX,
244 MVNETA_MIB_RX_MCAST_FRAME_IDX,
245 MVNETA_MIB_RX_FRAME64_OCT_IDX,
246 MVNETA_MIB_RX_FRAME127_OCT_IDX,
247 MVNETA_MIB_RX_FRAME255_OCT_IDX,
248 MVNETA_MIB_RX_FRAME511_OCT_IDX,
249 MVNETA_MIB_RX_FRAME1023_OCT_IDX,
250 MVNETA_MIB_RX_FRAMEMAX_OCT_IDX,
251 MVNETA_MIB_TX_GOOD_OCT_IDX,
252 MVNETA_MIB_TX_GOOD_FRAME_IDX,
253 MVNETA_MIB_TX_EXCES_COL_IDX,
254 MVNETA_MIB_TX_MCAST_FRAME_IDX,
255 MVNETA_MIB_TX_BCAST_FRAME_IDX,
256 MVNETA_MIB_TX_MAC_CTL_ERR_IDX,
257 MVNETA_MIB_FC_SENT_IDX,
258 MVNETA_MIB_FC_GOOD_IDX,
259 MVNETA_MIB_FC_BAD_IDX,
260 MVNETA_MIB_PKT_UNDERSIZE_IDX,
261 MVNETA_MIB_PKT_FRAGMENT_IDX,
262 MVNETA_MIB_PKT_OVERSIZE_IDX,
263 MVNETA_MIB_PKT_JABBER_IDX,
264 MVNETA_MIB_MAC_RX_ERR_IDX,
265 MVNETA_MIB_MAC_CRC_ERR_IDX,
266 MVNETA_MIB_MAC_COL_IDX,
267 MVNETA_MIB_MAC_LATE_COL_IDX,
270 STATIC struct mvneta_mib_def {
273 const char *sysctl_name;
275 } mvneta_mib_list[] = {
276 [MVNETA_MIB_RX_GOOD_OCT_IDX] = {MVNETA_MIB_RX_GOOD_OCT, 1,
277 "rx_good_oct", "Good Octets Rx"},
278 [MVNETA_MIB_RX_BAD_OCT_IDX] = {MVNETA_MIB_RX_BAD_OCT, 0,
279 "rx_bad_oct", "Bad Octets Rx"},
280 [MVNETA_MIB_TX_MAC_TRNS_ERR_IDX] = {MVNETA_MIB_TX_MAC_TRNS_ERR, 0,
281 "tx_mac_err", "MAC Transmit Error"},
282 [MVNETA_MIB_RX_GOOD_FRAME_IDX] = {MVNETA_MIB_RX_GOOD_FRAME, 0,
283 "rx_good_frame", "Good Frames Rx"},
284 [MVNETA_MIB_RX_BAD_FRAME_IDX] = {MVNETA_MIB_RX_BAD_FRAME, 0,
285 "rx_bad_frame", "Bad Frames Rx"},
286 [MVNETA_MIB_RX_BCAST_FRAME_IDX] = {MVNETA_MIB_RX_BCAST_FRAME, 0,
287 "rx_bcast_frame", "Broadcast Frames Rx"},
288 [MVNETA_MIB_RX_MCAST_FRAME_IDX] = {MVNETA_MIB_RX_MCAST_FRAME, 0,
289 "rx_mcast_frame", "Multicast Frames Rx"},
290 [MVNETA_MIB_RX_FRAME64_OCT_IDX] = {MVNETA_MIB_RX_FRAME64_OCT, 0,
291 "rx_frame_1_64", "Frame Size 1 - 64"},
292 [MVNETA_MIB_RX_FRAME127_OCT_IDX] = {MVNETA_MIB_RX_FRAME127_OCT, 0,
293 "rx_frame_65_127", "Frame Size 65 - 127"},
294 [MVNETA_MIB_RX_FRAME255_OCT_IDX] = {MVNETA_MIB_RX_FRAME255_OCT, 0,
295 "rx_frame_128_255", "Frame Size 128 - 255"},
296 [MVNETA_MIB_RX_FRAME511_OCT_IDX] = {MVNETA_MIB_RX_FRAME511_OCT, 0,
297 "rx_frame_256_511", "Frame Size 256 - 511"},
298 [MVNETA_MIB_RX_FRAME1023_OCT_IDX] = {MVNETA_MIB_RX_FRAME1023_OCT, 0,
299 "rx_frame_512_1023", "Frame Size 512 - 1023"},
300 [MVNETA_MIB_RX_FRAMEMAX_OCT_IDX] = {MVNETA_MIB_RX_FRAMEMAX_OCT, 0,
301 "rx_fame_1024_max", "Frame Size 1024 - Max"},
302 [MVNETA_MIB_TX_GOOD_OCT_IDX] = {MVNETA_MIB_TX_GOOD_OCT, 1,
303 "tx_good_oct", "Good Octets Tx"},
304 [MVNETA_MIB_TX_GOOD_FRAME_IDX] = {MVNETA_MIB_TX_GOOD_FRAME, 0,
305 "tx_good_frame", "Good Frames Tx"},
306 [MVNETA_MIB_TX_EXCES_COL_IDX] = {MVNETA_MIB_TX_EXCES_COL, 0,
307 "tx_exces_collision", "Excessive Collision"},
308 [MVNETA_MIB_TX_MCAST_FRAME_IDX] = {MVNETA_MIB_TX_MCAST_FRAME, 0,
309 "tx_mcast_frame", "Multicast Frames Tx"},
310 [MVNETA_MIB_TX_BCAST_FRAME_IDX] = {MVNETA_MIB_TX_BCAST_FRAME, 0,
311 "tx_bcast_frame", "Broadcast Frames Tx"},
312 [MVNETA_MIB_TX_MAC_CTL_ERR_IDX] = {MVNETA_MIB_TX_MAC_CTL_ERR, 0,
313 "tx_mac_ctl_err", "Unknown MAC Control"},
314 [MVNETA_MIB_FC_SENT_IDX] = {MVNETA_MIB_FC_SENT, 0,
315 "fc_tx", "Flow Control Tx"},
316 [MVNETA_MIB_FC_GOOD_IDX] = {MVNETA_MIB_FC_GOOD, 0,
317 "fc_rx_good", "Good Flow Control Rx"},
318 [MVNETA_MIB_FC_BAD_IDX] = {MVNETA_MIB_FC_BAD, 0,
319 "fc_rx_bad", "Bad Flow Control Rx"},
320 [MVNETA_MIB_PKT_UNDERSIZE_IDX] = {MVNETA_MIB_PKT_UNDERSIZE, 0,
321 "pkt_undersize", "Undersized Packets Rx"},
322 [MVNETA_MIB_PKT_FRAGMENT_IDX] = {MVNETA_MIB_PKT_FRAGMENT, 0,
323 "pkt_fragment", "Fragmented Packets Rx"},
324 [MVNETA_MIB_PKT_OVERSIZE_IDX] = {MVNETA_MIB_PKT_OVERSIZE, 0,
325 "pkt_oversize", "Oversized Packets Rx"},
326 [MVNETA_MIB_PKT_JABBER_IDX] = {MVNETA_MIB_PKT_JABBER, 0,
327 "pkt_jabber", "Jabber Packets Rx"},
328 [MVNETA_MIB_MAC_RX_ERR_IDX] = {MVNETA_MIB_MAC_RX_ERR, 0,
329 "mac_rx_err", "MAC Rx Errors"},
330 [MVNETA_MIB_MAC_CRC_ERR_IDX] = {MVNETA_MIB_MAC_CRC_ERR, 0,
331 "mac_crc_err", "MAC CRC Errors"},
332 [MVNETA_MIB_MAC_COL_IDX] = {MVNETA_MIB_MAC_COL, 0,
333 "mac_collision", "MAC Collision"},
334 [MVNETA_MIB_MAC_LATE_COL_IDX] = {MVNETA_MIB_MAC_LATE_COL, 0,
335 "mac_late_collision", "MAC Late Collision"},
338 static struct resource_spec res_spec[] = {
339 { SYS_RES_MEMORY, 0, RF_ACTIVE },
340 { SYS_RES_IRQ, 0, RF_ACTIVE },
345 driver_intr_t *handler;
348 { mvneta_rxtxth_intr, "MVNETA aggregated interrupt" },
354 #if defined(__aarch64__)
355 return (A3700_TCLK_250MHZ);
362 mvneta_set_mac_address(struct mvneta_softc *sc, uint8_t *addr)
367 mac_l = (addr[4] << 8) | (addr[5]);
368 mac_h = (addr[0] << 24) | (addr[1] << 16) |
369 (addr[2] << 8) | (addr[3] << 0);
371 MVNETA_WRITE(sc, MVNETA_MACAL, mac_l);
372 MVNETA_WRITE(sc, MVNETA_MACAH, mac_h);
377 mvneta_get_mac_address(struct mvneta_softc *sc, uint8_t *addr)
379 uint32_t mac_l, mac_h;
382 if (mvneta_fdt_mac_address(sc, addr) == 0)
386 * Fall back -- use the currently programmed address.
388 mac_l = MVNETA_READ(sc, MVNETA_MACAL);
389 mac_h = MVNETA_READ(sc, MVNETA_MACAH);
390 if (mac_l == 0 && mac_h == 0) {
392 * Generate pseudo-random MAC.
393 * Set lower part to random number | unit number.
395 mac_l = arc4random() & ~0xff;
396 mac_l |= device_get_unit(sc->dev) & 0xff;
397 mac_h = arc4random();
398 mac_h &= ~(3 << 24); /* Clear multicast and LAA bits */
400 device_printf(sc->dev,
401 "Could not acquire MAC address. "
402 "Using randomized one.\n");
406 addr[0] = (mac_h & 0xff000000) >> 24;
407 addr[1] = (mac_h & 0x00ff0000) >> 16;
408 addr[2] = (mac_h & 0x0000ff00) >> 8;
409 addr[3] = (mac_h & 0x000000ff);
410 addr[4] = (mac_l & 0x0000ff00) >> 8;
411 addr[5] = (mac_l & 0x000000ff);
416 mvneta_find_ethernet_prop_switch(phandle_t ethernet, phandle_t node)
419 phandle_t child, switch_eth_handle, switch_eth;
421 for (child = OF_child(node); child != 0; child = OF_peer(child)) {
422 if (OF_getencprop(child, "ethernet", (void*)&switch_eth_handle,
423 sizeof(switch_eth_handle)) > 0) {
424 if (switch_eth_handle > 0) {
425 switch_eth = OF_node_from_xref(
428 if (switch_eth == ethernet)
433 ret = mvneta_find_ethernet_prop_switch(ethernet, child);
442 mvneta_has_switch(device_t self)
446 node = ofw_bus_get_node(self);
448 return mvneta_find_ethernet_prop_switch(node, OF_finddevice("/"));
452 mvneta_dma_create(struct mvneta_softc *sc)
454 size_t maxsize, maxsegsz;
461 maxsize = maxsegsz = sizeof(struct mvneta_tx_desc) * MVNETA_TX_RING_CNT;
463 error = bus_dma_tag_create(
464 bus_get_dma_tag(sc->dev), /* parent */
465 16, 0, /* alignment, boundary */
466 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
467 BUS_SPACE_MAXADDR, /* highaddr */
468 NULL, NULL, /* filtfunc, filtfuncarg */
469 maxsize, /* maxsize */
471 maxsegsz, /* maxsegsz */
473 NULL, NULL, /* lockfunc, lockfuncarg */
474 &sc->tx_dtag); /* dmat */
476 device_printf(sc->dev,
477 "Failed to create DMA tag for Tx descriptors.\n");
480 error = bus_dma_tag_create(
481 bus_get_dma_tag(sc->dev), /* parent */
482 1, 0, /* alignment, boundary */
483 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
484 BUS_SPACE_MAXADDR, /* highaddr */
485 NULL, NULL, /* filtfunc, filtfuncarg */
486 MVNETA_PACKET_SIZE, /* maxsize */
487 MVNETA_TX_SEGLIMIT, /* nsegments */
488 MVNETA_PACKET_SIZE, /* maxsegsz */
489 BUS_DMA_ALLOCNOW, /* flags */
490 NULL, NULL, /* lockfunc, lockfuncarg */
493 device_printf(sc->dev,
494 "Failed to create DMA tag for Tx mbufs.\n");
498 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
499 error = mvneta_ring_alloc_tx_queue(sc, q);
501 device_printf(sc->dev,
502 "Failed to allocate DMA safe memory for TxQ: %zu\n", q);
510 /* Create tag for Rx descripors */
511 error = bus_dma_tag_create(
512 bus_get_dma_tag(sc->dev), /* parent */
513 32, 0, /* alignment, boundary */
514 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
515 BUS_SPACE_MAXADDR, /* highaddr */
516 NULL, NULL, /* filtfunc, filtfuncarg */
517 sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT, /* maxsize */
519 sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT, /* maxsegsz */
521 NULL, NULL, /* lockfunc, lockfuncarg */
522 &sc->rx_dtag); /* dmat */
524 device_printf(sc->dev,
525 "Failed to create DMA tag for Rx descriptors.\n");
529 /* Create tag for Rx buffers */
530 error = bus_dma_tag_create(
531 bus_get_dma_tag(sc->dev), /* parent */
532 32, 0, /* alignment, boundary */
533 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
534 BUS_SPACE_MAXADDR, /* highaddr */
535 NULL, NULL, /* filtfunc, filtfuncarg */
536 MVNETA_PACKET_SIZE, 1, /* maxsize, nsegments */
537 MVNETA_PACKET_SIZE, /* maxsegsz */
539 NULL, NULL, /* lockfunc, lockfuncarg */
540 &sc->rxbuf_dtag); /* dmat */
542 device_printf(sc->dev,
543 "Failed to create DMA tag for Rx buffers.\n");
547 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
548 if (mvneta_ring_alloc_rx_queue(sc, q) != 0) {
549 device_printf(sc->dev,
550 "Failed to allocate DMA safe memory for RxQ: %zu\n", q);
557 mvneta_detach(sc->dev);
564 mvneta_attach(device_t self)
566 struct mvneta_softc *sc;
571 #if !defined(__aarch64__)
575 sc = device_get_softc(self);
578 mtx_init(&sc->mtx, "mvneta_sc", NULL, MTX_DEF);
580 error = bus_alloc_resources(self, res_spec, sc->res);
582 device_printf(self, "could not allocate resources\n");
586 sc->version = MVNETA_READ(sc, MVNETA_PV);
587 device_printf(self, "version is %x\n", sc->version);
588 callout_init(&sc->tick_ch, 0);
591 * make sure DMA engines are in reset state
593 MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000001);
594 MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000001);
596 #if !defined(__aarch64__)
598 * Disable port snoop for buffers and descriptors
599 * to avoid L2 caching of both without DRAM copy.
600 * Obtain coherency settings from the first MBUS
603 if ((MVNETA_READ(sc, MV_WIN_NETA_BASE(0)) & IO_WIN_COH_ATTR_MASK) == 0) {
604 reg = MVNETA_READ(sc, MVNETA_PSNPCFG);
605 reg &= ~MVNETA_PSNPCFG_DESCSNP_MASK;
606 reg &= ~MVNETA_PSNPCFG_BUFSNP_MASK;
607 MVNETA_WRITE(sc, MVNETA_PSNPCFG, reg);
614 if (mvneta_get_mac_address(sc, sc->enaddr)) {
615 device_printf(self, "no mac address.\n");
618 mvneta_set_mac_address(sc, sc->enaddr);
620 mvneta_disable_intr(sc);
622 /* Allocate network interface */
623 ifp = sc->ifp = if_alloc(IFT_ETHER);
625 device_printf(self, "if_alloc() failed\n");
629 if_initname(ifp, device_get_name(self), device_get_unit(self));
632 * We can support 802.1Q VLAN-sized frames and jumbo
635 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_JUMBO_MTU;
638 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
639 #ifdef MVNETA_MULTIQUEUE
640 ifp->if_transmit = mvneta_transmit;
641 ifp->if_qflush = mvneta_qflush;
642 #else /* !MVNETA_MULTIQUEUE */
643 ifp->if_start = mvneta_start;
644 ifp->if_snd.ifq_drv_maxlen = MVNETA_TX_RING_CNT - 1;
645 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
646 IFQ_SET_READY(&ifp->if_snd);
648 ifp->if_init = mvneta_init;
649 ifp->if_ioctl = mvneta_ioctl;
652 * We can do IPv4/TCPv4/UDPv4/TCPv6/UDPv6 checksums in hardware.
654 ifp->if_capabilities |= IFCAP_HWCSUM;
657 * As VLAN hardware tagging is not supported
658 * but is necessary to perform VLAN hardware checksums,
659 * it is done in the driver
661 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
664 * Currently IPv6 HW checksum is broken, so make sure it is disabled.
666 ifp->if_capabilities &= ~IFCAP_HWCSUM_IPV6;
667 ifp->if_capenable = ifp->if_capabilities;
670 * Disabled option(s):
671 * - Support for Large Receive Offload
673 ifp->if_capabilities |= IFCAP_LRO;
675 ifp->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP;
678 * Device DMA Buffer allocation.
679 * Handles resource deallocation in case of failure.
681 error = mvneta_dma_create(sc);
687 /* Initialize queues */
688 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
689 error = mvneta_ring_init_tx_queue(sc, q);
696 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
697 error = mvneta_ring_init_rx_queue(sc, q);
704 ether_ifattach(ifp, sc->enaddr);
707 * Enable DMA engines and Initialize Device Registers.
709 MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000000);
710 MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000000);
711 MVNETA_WRITE(sc, MVNETA_PACC, MVNETA_PACC_ACCELERATIONMODE_EDM);
713 mvneta_filter_setup(sc);
714 mvneta_sc_unlock(sc);
718 * Now MAC is working, setup MII.
722 * MII bus is shared by all MACs and all PHYs in SoC.
723 * serializing the bus access should be safe.
725 mtx_init(&mii_mutex, "mvneta_mii", NULL, MTX_DEF);
730 if ((sc->phy_addr != MII_PHY_ANY) && (!sc->use_inband_status)) {
731 error = mii_attach(self, &sc->miibus, ifp, mvneta_mediachange,
732 mvneta_mediastatus, BMSR_DEFCAPMASK, sc->phy_addr,
737 "MII attach failed, error: %d\n", error);
739 ether_ifdetach(sc->ifp);
743 sc->mii = device_get_softc(sc->miibus);
744 sc->phy_attached = 1;
746 /* Disable auto-negotiation in MAC - rely on PHY layer */
747 mvneta_update_autoneg(sc, FALSE);
748 } else if (sc->use_inband_status == TRUE) {
749 /* In-band link status */
750 ifmedia_init(&sc->mvneta_ifmedia, 0, mvneta_mediachange,
753 /* Configure media */
754 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX,
756 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL);
757 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX,
759 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_10_T, 0, NULL);
760 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX,
762 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
763 ifmedia_set(&sc->mvneta_ifmedia, IFM_ETHER | IFM_AUTO);
765 /* Enable auto-negotiation */
766 mvneta_update_autoneg(sc, TRUE);
769 if (MVNETA_IS_LINKUP(sc))
773 mvneta_sc_unlock(sc);
776 /* Fixed-link, use predefined values */
777 mvneta_update_autoneg(sc, FALSE);
778 ifmedia_init(&sc->mvneta_ifmedia, 0, mvneta_mediachange,
781 ifm_target = IFM_ETHER;
782 switch (sc->phy_speed) {
784 if (sc->phy_mode != MVNETA_PHY_SGMII &&
785 sc->phy_mode != MVNETA_PHY_QSGMII) {
787 "2.5G speed can work only in (Q)SGMII mode\n");
788 ether_ifdetach(sc->ifp);
792 ifm_target |= IFM_2500_T;
795 ifm_target |= IFM_1000_T;
798 ifm_target |= IFM_100_TX;
801 ifm_target |= IFM_10_T;
804 ether_ifdetach(sc->ifp);
810 ifm_target |= IFM_FDX;
812 ifm_target |= IFM_HDX;
814 ifmedia_add(&sc->mvneta_ifmedia, ifm_target, 0, NULL);
815 ifmedia_set(&sc->mvneta_ifmedia, ifm_target);
816 if_link_state_change(sc->ifp, LINK_STATE_UP);
818 if (mvneta_has_switch(self)) {
820 device_printf(self, "This device is attached to a switch\n");
821 child = device_add_child(sc->dev, "mdio", -1);
823 ether_ifdetach(sc->ifp);
827 bus_generic_attach(sc->dev);
828 bus_generic_attach(child);
831 /* Configure MAC media */
832 mvneta_update_media(sc, ifm_target);
835 sysctl_mvneta_init(sc);
837 callout_reset(&sc->tick_ch, 0, mvneta_tick, sc);
839 error = bus_setup_intr(self, sc->res[1],
840 INTR_TYPE_NET | INTR_MPSAFE, NULL, mvneta_intrs[0].handler, sc,
843 device_printf(self, "could not setup %s\n",
844 mvneta_intrs[0].description);
845 ether_ifdetach(sc->ifp);
854 mvneta_detach(device_t dev)
856 struct mvneta_softc *sc;
859 sc = device_get_softc(dev);
862 /* Detach network interface */
866 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++)
867 mvneta_ring_dealloc_rx_queue(sc, q);
868 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++)
869 mvneta_ring_dealloc_tx_queue(sc, q);
871 if (sc->tx_dtag != NULL)
872 bus_dma_tag_destroy(sc->tx_dtag);
873 if (sc->rx_dtag != NULL)
874 bus_dma_tag_destroy(sc->rx_dtag);
875 if (sc->txmbuf_dtag != NULL)
876 bus_dma_tag_destroy(sc->txmbuf_dtag);
878 bus_release_resources(dev, res_spec, sc->res);
886 mvneta_miibus_readreg(device_t dev, int phy, int reg)
888 struct mvneta_softc *sc;
893 sc = device_get_softc(dev);
896 mtx_lock(&mii_mutex);
898 for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
899 if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
903 if (i == MVNETA_PHY_TIMEOUT) {
904 if_printf(ifp, "SMI busy timeout\n");
905 mtx_unlock(&mii_mutex);
909 smi = MVNETA_SMI_PHYAD(phy) |
910 MVNETA_SMI_REGAD(reg) | MVNETA_SMI_OPCODE_READ;
911 MVNETA_WRITE(sc, MVNETA_SMI, smi);
913 for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
914 if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
919 if (i == MVNETA_PHY_TIMEOUT) {
920 if_printf(ifp, "SMI busy timeout\n");
921 mtx_unlock(&mii_mutex);
924 for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
925 smi = MVNETA_READ(sc, MVNETA_SMI);
926 if (smi & MVNETA_SMI_READVALID)
931 if (i == MVNETA_PHY_TIMEOUT) {
932 if_printf(ifp, "SMI busy timeout\n");
933 mtx_unlock(&mii_mutex);
937 mtx_unlock(&mii_mutex);
940 CTR3(KTR_SPARE2, "%s i=%d, timeout=%d\n", ifp->if_xname, i,
944 val = smi & MVNETA_SMI_DATA_MASK;
947 CTR4(KTR_SPARE2, "%s phy=%d, reg=%#x, val=%#x\n", ifp->if_xname, phy,
954 mvneta_miibus_writereg(device_t dev, int phy, int reg, int val)
956 struct mvneta_softc *sc;
961 sc = device_get_softc(dev);
964 CTR4(KTR_SPARE2, "%s phy=%d, reg=%#x, val=%#x\n", ifp->if_xname,
968 mtx_lock(&mii_mutex);
970 for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
971 if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
975 if (i == MVNETA_PHY_TIMEOUT) {
976 if_printf(ifp, "SMI busy timeout\n");
977 mtx_unlock(&mii_mutex);
981 smi = MVNETA_SMI_PHYAD(phy) | MVNETA_SMI_REGAD(reg) |
982 MVNETA_SMI_OPCODE_WRITE | (val & MVNETA_SMI_DATA_MASK);
983 MVNETA_WRITE(sc, MVNETA_SMI, smi);
985 for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
986 if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
991 mtx_unlock(&mii_mutex);
993 if (i == MVNETA_PHY_TIMEOUT)
994 if_printf(ifp, "phy write timed out\n");
1000 mvneta_portup(struct mvneta_softc *sc)
1004 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
1005 mvneta_rx_lockq(sc, q);
1006 mvneta_rx_queue_enable(sc->ifp, q);
1007 mvneta_rx_unlockq(sc, q);
1010 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1011 mvneta_tx_lockq(sc, q);
1012 mvneta_tx_queue_enable(sc->ifp, q);
1013 mvneta_tx_unlockq(sc, q);
1019 mvneta_portdown(struct mvneta_softc *sc)
1021 struct mvneta_rx_ring *rx;
1022 struct mvneta_tx_ring *tx;
1026 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
1027 rx = MVNETA_RX_RING(sc, q);
1028 mvneta_rx_lockq(sc, q);
1029 rx->queue_status = MVNETA_QUEUE_DISABLED;
1030 mvneta_rx_unlockq(sc, q);
1033 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1034 tx = MVNETA_TX_RING(sc, q);
1035 mvneta_tx_lockq(sc, q);
1036 tx->queue_status = MVNETA_QUEUE_DISABLED;
1037 mvneta_tx_unlockq(sc, q);
1040 /* Wait for all Rx activity to terminate. */
1041 reg = MVNETA_READ(sc, MVNETA_RQC) & MVNETA_RQC_EN_MASK;
1042 reg = MVNETA_RQC_DIS(reg);
1043 MVNETA_WRITE(sc, MVNETA_RQC, reg);
1046 if (cnt >= RX_DISABLE_TIMEOUT) {
1048 "timeout for RX stopped. rqc 0x%x\n", reg);
1052 reg = MVNETA_READ(sc, MVNETA_RQC);
1053 } while ((reg & MVNETA_RQC_EN_MASK) != 0);
1055 /* Wait for all Tx activity to terminate. */
1056 reg = MVNETA_READ(sc, MVNETA_PIE);
1057 reg &= ~MVNETA_PIE_TXPKTINTRPTENB_MASK;
1058 MVNETA_WRITE(sc, MVNETA_PIE, reg);
1060 reg = MVNETA_READ(sc, MVNETA_PRXTXTIM);
1061 reg &= ~MVNETA_PRXTXTI_TBTCQ_MASK;
1062 MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg);
1064 reg = MVNETA_READ(sc, MVNETA_TQC) & MVNETA_TQC_EN_MASK;
1065 reg = MVNETA_TQC_DIS(reg);
1066 MVNETA_WRITE(sc, MVNETA_TQC, reg);
1069 if (cnt >= TX_DISABLE_TIMEOUT) {
1071 "timeout for TX stopped. tqc 0x%x\n", reg);
1075 reg = MVNETA_READ(sc, MVNETA_TQC);
1076 } while ((reg & MVNETA_TQC_EN_MASK) != 0);
1078 /* Wait for all Tx FIFO is empty */
1081 if (cnt >= TX_FIFO_EMPTY_TIMEOUT) {
1083 "timeout for TX FIFO drained. ps0 0x%x\n", reg);
1087 reg = MVNETA_READ(sc, MVNETA_PS0);
1088 } while (((reg & MVNETA_PS0_TXFIFOEMP) == 0) &&
1089 ((reg & MVNETA_PS0_TXINPROG) != 0));
1093 * Device Register Initialization
1094 * reset device registers to device driver default value.
1095 * the device is not enabled here.
1098 mvneta_initreg(struct ifnet *ifp)
1100 struct mvneta_softc *sc;
1106 CTR1(KTR_SPARE2, "%s initializing device register", ifp->if_xname);
1109 /* Disable Legacy WRR, Disable EJP, Release from reset. */
1110 MVNETA_WRITE(sc, MVNETA_TQC_1, 0);
1111 /* Enable mbus retry. */
1112 MVNETA_WRITE(sc, MVNETA_MBUS_CONF, MVNETA_MBUS_RETRY_EN);
1114 /* Init TX/RX Queue Registers */
1115 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
1116 mvneta_rx_lockq(sc, q);
1117 if (mvneta_rx_queue_init(ifp, q) != 0) {
1118 device_printf(sc->dev,
1119 "initialization failed: cannot initialize queue\n");
1120 mvneta_rx_unlockq(sc, q);
1123 mvneta_rx_unlockq(sc, q);
1125 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1126 mvneta_tx_lockq(sc, q);
1127 if (mvneta_tx_queue_init(ifp, q) != 0) {
1128 device_printf(sc->dev,
1129 "initialization failed: cannot initialize queue\n");
1130 mvneta_tx_unlockq(sc, q);
1133 mvneta_tx_unlockq(sc, q);
1137 * Ethernet Unit Control - disable automatic PHY management by HW.
1138 * In case the port uses SMI-controlled PHY, poll its status with
1139 * mii_tick() and update MAC settings accordingly.
1141 reg = MVNETA_READ(sc, MVNETA_EUC);
1142 reg &= ~MVNETA_EUC_POLLING;
1143 MVNETA_WRITE(sc, MVNETA_EUC, reg);
1145 /* EEE: Low Power Idle */
1146 reg = MVNETA_LPIC0_LILIMIT(MVNETA_LPI_LI);
1147 reg |= MVNETA_LPIC0_TSLIMIT(MVNETA_LPI_TS);
1148 MVNETA_WRITE(sc, MVNETA_LPIC0, reg);
1150 reg = MVNETA_LPIC1_TWLIMIT(MVNETA_LPI_TW);
1151 MVNETA_WRITE(sc, MVNETA_LPIC1, reg);
1153 reg = MVNETA_LPIC2_MUSTSET;
1154 MVNETA_WRITE(sc, MVNETA_LPIC2, reg);
1156 /* Port MAC Control set 0 */
1157 reg = MVNETA_PMACC0_MUSTSET; /* must write 0x1 */
1158 reg &= ~MVNETA_PMACC0_PORTEN; /* port is still disabled */
1159 reg |= MVNETA_PMACC0_FRAMESIZELIMIT(MVNETA_MAX_FRAME);
1160 MVNETA_WRITE(sc, MVNETA_PMACC0, reg);
1162 /* Port MAC Control set 2 */
1163 reg = MVNETA_READ(sc, MVNETA_PMACC2);
1164 switch (sc->phy_mode) {
1165 case MVNETA_PHY_QSGMII:
1166 reg |= (MVNETA_PMACC2_PCSEN | MVNETA_PMACC2_RGMIIEN);
1167 MVNETA_WRITE(sc, MVNETA_PSERDESCFG, MVNETA_PSERDESCFG_QSGMII);
1169 case MVNETA_PHY_SGMII:
1170 reg |= (MVNETA_PMACC2_PCSEN | MVNETA_PMACC2_RGMIIEN);
1171 MVNETA_WRITE(sc, MVNETA_PSERDESCFG, MVNETA_PSERDESCFG_SGMII);
1173 case MVNETA_PHY_RGMII:
1174 case MVNETA_PHY_RGMII_ID:
1175 reg |= MVNETA_PMACC2_RGMIIEN;
1178 reg |= MVNETA_PMACC2_MUSTSET;
1179 reg &= ~MVNETA_PMACC2_PORTMACRESET;
1180 MVNETA_WRITE(sc, MVNETA_PMACC2, reg);
1182 /* Port Configuration Extended: enable Tx CRC generation */
1183 reg = MVNETA_READ(sc, MVNETA_PXCX);
1184 reg &= ~MVNETA_PXCX_TXCRCDIS;
1185 MVNETA_WRITE(sc, MVNETA_PXCX, reg);
1187 /* clear MIB counter registers(clear by read) */
1188 for (i = 0; i < nitems(mvneta_mib_list); i++) {
1189 if (mvneta_mib_list[i].reg64)
1190 MVNETA_READ_MIB_8(sc, mvneta_mib_list[i].regnum);
1192 MVNETA_READ_MIB_4(sc, mvneta_mib_list[i].regnum);
1194 MVNETA_READ(sc, MVNETA_PDFC);
1195 MVNETA_READ(sc, MVNETA_POFC);
1197 /* Set SDC register except IPGINT bits */
1198 reg = MVNETA_SDC_RXBSZ_16_64BITWORDS;
1199 reg |= MVNETA_SDC_TXBSZ_16_64BITWORDS;
1200 reg |= MVNETA_SDC_BLMR;
1201 reg |= MVNETA_SDC_BLMT;
1202 MVNETA_WRITE(sc, MVNETA_SDC, reg);
1208 mvneta_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1213 *(bus_addr_t *)arg = segs->ds_addr;
1217 mvneta_ring_alloc_rx_queue(struct mvneta_softc *sc, int q)
1219 struct mvneta_rx_ring *rx;
1220 struct mvneta_buf *rxbuf;
1224 if (q >= MVNETA_RX_QNUM_MAX)
1227 rx = MVNETA_RX_RING(sc, q);
1228 mtx_init(&rx->ring_mtx, "mvneta_rx", NULL, MTX_DEF);
1229 /* Allocate DMA memory for Rx descriptors */
1230 error = bus_dmamem_alloc(sc->rx_dtag,
1231 (void**)&(rx->desc),
1232 BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1234 if (error != 0 || rx->desc == NULL)
1236 error = bus_dmamap_load(sc->rx_dtag, rx->desc_map,
1238 sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT,
1239 mvneta_dmamap_cb, &rx->desc_pa, BUS_DMA_NOWAIT);
1243 for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
1244 error = bus_dmamap_create(sc->rxbuf_dtag, 0, &dmap);
1246 device_printf(sc->dev,
1247 "Failed to create DMA map for Rx buffer num: %d\n", i);
1250 rxbuf = &rx->rxbuf[i];
1257 mvneta_ring_dealloc_rx_queue(sc, q);
1258 device_printf(sc->dev, "DMA Ring buffer allocation failure.\n");
1263 mvneta_ring_alloc_tx_queue(struct mvneta_softc *sc, int q)
1265 struct mvneta_tx_ring *tx;
1268 if (q >= MVNETA_TX_QNUM_MAX)
1270 tx = MVNETA_TX_RING(sc, q);
1271 mtx_init(&tx->ring_mtx, "mvneta_tx", NULL, MTX_DEF);
1272 error = bus_dmamem_alloc(sc->tx_dtag,
1273 (void**)&(tx->desc),
1274 BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1276 if (error != 0 || tx->desc == NULL)
1278 error = bus_dmamap_load(sc->tx_dtag, tx->desc_map,
1280 sizeof(struct mvneta_tx_desc) * MVNETA_TX_RING_CNT,
1281 mvneta_dmamap_cb, &tx->desc_pa, BUS_DMA_NOWAIT);
1285 #ifdef MVNETA_MULTIQUEUE
1286 tx->br = buf_ring_alloc(MVNETA_BUFRING_SIZE, M_DEVBUF, M_NOWAIT,
1288 if (tx->br == NULL) {
1289 device_printf(sc->dev,
1290 "Could not setup buffer ring for TxQ(%d)\n", q);
1298 mvneta_ring_dealloc_tx_queue(sc, q);
1299 device_printf(sc->dev, "DMA Ring buffer allocation failure.\n");
1304 mvneta_ring_dealloc_tx_queue(struct mvneta_softc *sc, int q)
1306 struct mvneta_tx_ring *tx;
1307 struct mvneta_buf *txbuf;
1312 if (q >= MVNETA_TX_QNUM_MAX)
1314 tx = MVNETA_TX_RING(sc, q);
1316 if (tx->taskq != NULL) {
1318 while (taskqueue_cancel(tx->taskq, &tx->task, NULL) != 0)
1319 taskqueue_drain(tx->taskq, &tx->task);
1321 #ifdef MVNETA_MULTIQUEUE
1323 drbr_free(tx->br, M_DEVBUF);
1326 if (sc->txmbuf_dtag != NULL) {
1327 if (mtx_name(&tx->ring_mtx) != NULL) {
1329 * It is assumed that maps are being loaded after mutex
1330 * is initialized. Therefore we can skip unloading maps
1331 * when mutex is empty.
1333 mvneta_tx_lockq(sc, q);
1334 mvneta_ring_flush_tx_queue(sc, q);
1335 mvneta_tx_unlockq(sc, q);
1337 for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
1338 txbuf = &tx->txbuf[i];
1339 if (txbuf->dmap != NULL) {
1340 error = bus_dmamap_destroy(sc->txmbuf_dtag,
1343 panic("%s: map busy for Tx descriptor (Q%d, %d)",
1350 if (tx->desc_pa != 0)
1351 bus_dmamap_unload(sc->tx_dtag, tx->desc_map);
1353 kva = (void *)tx->desc;
1355 bus_dmamem_free(sc->tx_dtag, tx->desc, tx->desc_map);
1357 if (mtx_name(&tx->ring_mtx) != NULL)
1358 mtx_destroy(&tx->ring_mtx);
1360 memset(tx, 0, sizeof(*tx));
1364 mvneta_ring_dealloc_rx_queue(struct mvneta_softc *sc, int q)
1366 struct mvneta_rx_ring *rx;
1367 struct lro_ctrl *lro;
1370 if (q >= MVNETA_RX_QNUM_MAX)
1373 rx = MVNETA_RX_RING(sc, q);
1375 mvneta_ring_flush_rx_queue(sc, q);
1377 if (rx->desc_pa != 0)
1378 bus_dmamap_unload(sc->rx_dtag, rx->desc_map);
1380 kva = (void *)rx->desc;
1382 bus_dmamem_free(sc->rx_dtag, rx->desc, rx->desc_map);
1387 if (mtx_name(&rx->ring_mtx) != NULL)
1388 mtx_destroy(&rx->ring_mtx);
1390 memset(rx, 0, sizeof(*rx));
1394 mvneta_ring_init_rx_queue(struct mvneta_softc *sc, int q)
1396 struct mvneta_rx_ring *rx;
1397 struct lro_ctrl *lro;
1400 if (q >= MVNETA_RX_QNUM_MAX)
1403 rx = MVNETA_RX_RING(sc, q);
1404 rx->dma = rx->cpu = 0;
1405 rx->queue_th_received = MVNETA_RXTH_COUNT;
1406 rx->queue_th_time = (mvneta_get_clk() / 1000) / 10; /* 0.1 [ms] */
1408 /* Initialize LRO */
1409 rx->lro_enabled = FALSE;
1410 if ((sc->ifp->if_capenable & IFCAP_LRO) != 0) {
1412 error = tcp_lro_init(lro);
1414 device_printf(sc->dev, "LRO Initialization failed!\n");
1416 rx->lro_enabled = TRUE;
1425 mvneta_ring_init_tx_queue(struct mvneta_softc *sc, int q)
1427 struct mvneta_tx_ring *tx;
1428 struct mvneta_buf *txbuf;
1431 if (q >= MVNETA_TX_QNUM_MAX)
1434 tx = MVNETA_TX_RING(sc, q);
1437 for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
1438 txbuf = &tx->txbuf[i];
1440 /* Tx handle needs DMA map for busdma_load_mbuf() */
1441 error = bus_dmamap_create(sc->txmbuf_dtag, 0,
1444 device_printf(sc->dev,
1445 "can't create dma map (tx ring %d)\n", i);
1449 tx->dma = tx->cpu = 0;
1452 tx->queue_status = MVNETA_QUEUE_DISABLED;
1453 tx->queue_hung = FALSE;
1457 TASK_INIT(&tx->task, 0, mvneta_tx_task, tx);
1458 tx->taskq = taskqueue_create_fast("mvneta_tx_taskq", M_WAITOK,
1459 taskqueue_thread_enqueue, &tx->taskq);
1460 taskqueue_start_threads(&tx->taskq, 1, PI_NET, "%s: tx_taskq(%d)",
1461 device_get_nameunit(sc->dev), q);
1467 mvneta_ring_flush_tx_queue(struct mvneta_softc *sc, int q)
1469 struct mvneta_tx_ring *tx;
1470 struct mvneta_buf *txbuf;
1473 tx = MVNETA_TX_RING(sc, q);
1474 KASSERT_TX_MTX(sc, q);
1477 for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
1478 txbuf = &tx->txbuf[i];
1479 bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
1480 if (txbuf->m != NULL) {
1485 tx->dma = tx->cpu = 0;
1490 mvneta_ring_flush_rx_queue(struct mvneta_softc *sc, int q)
1492 struct mvneta_rx_ring *rx;
1493 struct mvneta_buf *rxbuf;
1496 rx = MVNETA_RX_RING(sc, q);
1497 KASSERT_RX_MTX(sc, q);
1500 for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
1501 rxbuf = &rx->rxbuf[i];
1502 mvneta_rx_buf_free(sc, rxbuf);
1504 rx->dma = rx->cpu = 0;
1508 * Rx/Tx Queue Control
1511 mvneta_rx_queue_init(struct ifnet *ifp, int q)
1513 struct mvneta_softc *sc;
1514 struct mvneta_rx_ring *rx;
1518 KASSERT_RX_MTX(sc, q);
1519 rx = MVNETA_RX_RING(sc, q);
1520 DASSERT(rx->desc_pa != 0);
1522 /* descriptor address */
1523 MVNETA_WRITE(sc, MVNETA_PRXDQA(q), rx->desc_pa);
1525 /* Rx buffer size and descriptor ring size */
1526 reg = MVNETA_PRXDQS_BUFFERSIZE(MVNETA_PACKET_SIZE >> 3);
1527 reg |= MVNETA_PRXDQS_DESCRIPTORSQUEUESIZE(MVNETA_RX_RING_CNT);
1528 MVNETA_WRITE(sc, MVNETA_PRXDQS(q), reg);
1530 CTR3(KTR_SPARE2, "%s PRXDQS(%d): %#x", ifp->if_xname, q,
1531 MVNETA_READ(sc, MVNETA_PRXDQS(q)));
1533 /* Rx packet offset address */
1534 reg = MVNETA_PRXC_PACKETOFFSET(MVNETA_PACKET_OFFSET >> 3);
1535 MVNETA_WRITE(sc, MVNETA_PRXC(q), reg);
1537 CTR3(KTR_SPARE2, "%s PRXC(%d): %#x", ifp->if_xname, q,
1538 MVNETA_READ(sc, MVNETA_PRXC(q)));
1541 /* if DMA is not working, register is not updated */
1542 DASSERT(MVNETA_READ(sc, MVNETA_PRXDQA(q)) == rx->desc_pa);
1547 mvneta_tx_queue_init(struct ifnet *ifp, int q)
1549 struct mvneta_softc *sc;
1550 struct mvneta_tx_ring *tx;
1554 KASSERT_TX_MTX(sc, q);
1555 tx = MVNETA_TX_RING(sc, q);
1556 DASSERT(tx->desc_pa != 0);
1558 /* descriptor address */
1559 MVNETA_WRITE(sc, MVNETA_PTXDQA(q), tx->desc_pa);
1561 /* descriptor ring size */
1562 reg = MVNETA_PTXDQS_DQS(MVNETA_TX_RING_CNT);
1563 MVNETA_WRITE(sc, MVNETA_PTXDQS(q), reg);
1565 /* if DMA is not working, register is not updated */
1566 DASSERT(MVNETA_READ(sc, MVNETA_PTXDQA(q)) == tx->desc_pa);
1571 mvneta_rx_queue_enable(struct ifnet *ifp, int q)
1573 struct mvneta_softc *sc;
1574 struct mvneta_rx_ring *rx;
1578 rx = MVNETA_RX_RING(sc, q);
1579 KASSERT_RX_MTX(sc, q);
1581 /* Set Rx interrupt threshold */
1582 reg = MVNETA_PRXDQTH_ODT(rx->queue_th_received);
1583 MVNETA_WRITE(sc, MVNETA_PRXDQTH(q), reg);
1585 reg = MVNETA_PRXITTH_RITT(rx->queue_th_time);
1586 MVNETA_WRITE(sc, MVNETA_PRXITTH(q), reg);
1588 /* Unmask RXTX_TH Intr. */
1589 reg = MVNETA_READ(sc, MVNETA_PRXTXTIM);
1590 reg |= MVNETA_PRXTXTI_RBICTAPQ(q); /* Rx Buffer Interrupt Coalese */
1591 MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg);
1593 /* Enable Rx queue */
1594 reg = MVNETA_READ(sc, MVNETA_RQC) & MVNETA_RQC_EN_MASK;
1595 reg |= MVNETA_RQC_ENQ(q);
1596 MVNETA_WRITE(sc, MVNETA_RQC, reg);
1598 rx->queue_status = MVNETA_QUEUE_WORKING;
1603 mvneta_tx_queue_enable(struct ifnet *ifp, int q)
1605 struct mvneta_softc *sc;
1606 struct mvneta_tx_ring *tx;
1609 tx = MVNETA_TX_RING(sc, q);
1610 KASSERT_TX_MTX(sc, q);
1612 /* Enable Tx queue */
1613 MVNETA_WRITE(sc, MVNETA_TQC, MVNETA_TQC_ENQ(q));
1615 tx->queue_status = MVNETA_QUEUE_IDLE;
1616 tx->queue_hung = FALSE;
1620 STATIC __inline void
1621 mvneta_rx_lockq(struct mvneta_softc *sc, int q)
1625 DASSERT(q < MVNETA_RX_QNUM_MAX);
1626 mtx_lock(&sc->rx_ring[q].ring_mtx);
1629 STATIC __inline void
1630 mvneta_rx_unlockq(struct mvneta_softc *sc, int q)
1634 DASSERT(q < MVNETA_RX_QNUM_MAX);
1635 mtx_unlock(&sc->rx_ring[q].ring_mtx);
1638 STATIC __inline int __unused
1639 mvneta_tx_trylockq(struct mvneta_softc *sc, int q)
1643 DASSERT(q < MVNETA_TX_QNUM_MAX);
1644 return (mtx_trylock(&sc->tx_ring[q].ring_mtx));
1647 STATIC __inline void
1648 mvneta_tx_lockq(struct mvneta_softc *sc, int q)
1652 DASSERT(q < MVNETA_TX_QNUM_MAX);
1653 mtx_lock(&sc->tx_ring[q].ring_mtx);
1656 STATIC __inline void
1657 mvneta_tx_unlockq(struct mvneta_softc *sc, int q)
1661 DASSERT(q < MVNETA_TX_QNUM_MAX);
1662 mtx_unlock(&sc->tx_ring[q].ring_mtx);
1666 * Interrupt Handlers
1669 mvneta_disable_intr(struct mvneta_softc *sc)
1672 MVNETA_WRITE(sc, MVNETA_EUIM, 0);
1673 MVNETA_WRITE(sc, MVNETA_EUIC, 0);
1674 MVNETA_WRITE(sc, MVNETA_PRXTXTIM, 0);
1675 MVNETA_WRITE(sc, MVNETA_PRXTXTIC, 0);
1676 MVNETA_WRITE(sc, MVNETA_PRXTXIM, 0);
1677 MVNETA_WRITE(sc, MVNETA_PRXTXIC, 0);
1678 MVNETA_WRITE(sc, MVNETA_PMIM, 0);
1679 MVNETA_WRITE(sc, MVNETA_PMIC, 0);
1680 MVNETA_WRITE(sc, MVNETA_PIE, 0);
1684 mvneta_enable_intr(struct mvneta_softc *sc)
1688 /* Enable Summary Bit to check all interrupt cause. */
1689 reg = MVNETA_READ(sc, MVNETA_PRXTXTIM);
1690 reg |= MVNETA_PRXTXTI_PMISCICSUMMARY;
1691 MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg);
1693 if (sc->use_inband_status) {
1694 /* Enable Port MISC Intr. (via RXTX_TH_Summary bit) */
1695 MVNETA_WRITE(sc, MVNETA_PMIM, MVNETA_PMI_PHYSTATUSCHNG |
1696 MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHANGE);
1699 /* Enable All Queue Interrupt */
1700 reg = MVNETA_READ(sc, MVNETA_PIE);
1701 reg |= MVNETA_PIE_RXPKTINTRPTENB_MASK;
1702 reg |= MVNETA_PIE_TXPKTINTRPTENB_MASK;
1703 MVNETA_WRITE(sc, MVNETA_PIE, reg);
1707 mvneta_rxtxth_intr(void *arg)
1709 struct mvneta_softc *sc;
1711 uint32_t ic, queues;
1716 CTR1(KTR_SPARE2, "%s got RXTX_TH_Intr", ifp->if_xname);
1718 ic = MVNETA_READ(sc, MVNETA_PRXTXTIC);
1721 MVNETA_WRITE(sc, MVNETA_PRXTXTIC, ~ic);
1723 /* Ack maintance interrupt first */
1724 if (__predict_false((ic & MVNETA_PRXTXTI_PMISCICSUMMARY) &&
1725 sc->use_inband_status)) {
1727 mvneta_misc_intr(sc);
1728 mvneta_sc_unlock(sc);
1730 if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING)))
1732 /* RxTxTH interrupt */
1733 queues = MVNETA_PRXTXTI_GET_RBICTAPQ(ic);
1734 if (__predict_true(queues)) {
1736 CTR1(KTR_SPARE2, "%s got PRXTXTIC: +RXEOF", ifp->if_xname);
1738 /* At the moment the driver support only one RX queue. */
1739 DASSERT(MVNETA_IS_QUEUE_SET(queues, 0));
1740 mvneta_rx(sc, 0, 0);
1745 mvneta_misc_intr(struct mvneta_softc *sc)
1751 CTR1(KTR_SPARE2, "%s got MISC_INTR", sc->ifp->if_xname);
1756 ic = MVNETA_READ(sc, MVNETA_PMIC);
1757 ic &= MVNETA_READ(sc, MVNETA_PMIM);
1760 MVNETA_WRITE(sc, MVNETA_PMIC, ~ic);
1763 if (ic & (MVNETA_PMI_PHYSTATUSCHNG |
1764 MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHANGE))
1765 mvneta_link_isr(sc);
1771 mvneta_tick(void *arg)
1773 struct mvneta_softc *sc;
1774 struct mvneta_tx_ring *tx;
1775 struct mvneta_rx_ring *rx;
1777 uint32_t fc_prev, fc_curr;
1782 * This is done before mib update to get the right stats
1785 mvneta_tx_drain(sc);
1787 /* Extract previous flow-control frame received counter. */
1788 fc_prev = sc->sysctl_mib[MVNETA_MIB_FC_GOOD_IDX].counter;
1789 /* Read mib registers (clear by read). */
1790 mvneta_update_mib(sc);
1791 /* Extract current flow-control frame received counter. */
1792 fc_curr = sc->sysctl_mib[MVNETA_MIB_FC_GOOD_IDX].counter;
1795 if (sc->phy_attached && sc->ifp->if_flags & IFF_UP) {
1799 /* Adjust MAC settings */
1800 mvneta_adjust_link(sc);
1801 mvneta_sc_unlock(sc);
1805 * We were unable to refill the rx queue and left the rx func, leaving
1806 * the ring without mbuf and no way to call the refill func.
1808 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
1809 rx = MVNETA_RX_RING(sc, q);
1810 if (rx->needs_refill == TRUE) {
1811 mvneta_rx_lockq(sc, q);
1812 mvneta_rx_queue_refill(sc, q);
1813 mvneta_rx_unlockq(sc, q);
1819 * - check if queue is mark as hung.
1820 * - ignore hung status if we received some pause frame
1821 * as hardware may have paused packet transmit.
1823 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1825 * We should take queue lock, but as we only read
1826 * queue status we can do it without lock, we may
1827 * only missdetect queue status for one tick.
1829 tx = MVNETA_TX_RING(sc, q);
1831 if (tx->queue_hung && (fc_curr - fc_prev) == 0)
1835 callout_schedule(&sc->tick_ch, hz);
1839 if_printf(sc->ifp, "watchdog timeout\n");
1842 sc->counter_watchdog++;
1843 sc->counter_watchdog_mib++;
1844 /* Trigger reinitialize sequence. */
1845 mvneta_stop_locked(sc);
1846 mvneta_init_locked(sc);
1847 mvneta_sc_unlock(sc);
1851 mvneta_qflush(struct ifnet *ifp)
1853 #ifdef MVNETA_MULTIQUEUE
1854 struct mvneta_softc *sc;
1855 struct mvneta_tx_ring *tx;
1861 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1862 tx = MVNETA_TX_RING(sc, q);
1863 mvneta_tx_lockq(sc, q);
1864 while ((m = buf_ring_dequeue_sc(tx->br)) != NULL)
1866 mvneta_tx_unlockq(sc, q);
1873 mvneta_tx_task(void *arg, int pending)
1875 struct mvneta_softc *sc;
1876 struct mvneta_tx_ring *tx;
1884 mvneta_tx_lockq(sc, tx->qidx);
1885 error = mvneta_xmit_locked(sc, tx->qidx);
1886 mvneta_tx_unlockq(sc, tx->qidx);
1889 if (__predict_false(error != 0 && error != ENETDOWN)) {
1890 pause("mvneta_tx_task_sleep", 1);
1891 taskqueue_enqueue(tx->taskq, &tx->task);
1896 mvneta_xmitfast_locked(struct mvneta_softc *sc, int q, struct mbuf **m)
1898 struct mvneta_tx_ring *tx;
1902 KASSERT_TX_MTX(sc, q);
1903 tx = MVNETA_TX_RING(sc, q);
1908 /* Dont enqueue packet if the queue is disabled. */
1909 if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED)) {
1915 /* Reclaim mbuf if above threshold. */
1916 if (__predict_true(tx->used > MVNETA_TX_RECLAIM_COUNT))
1917 mvneta_tx_queue_complete(sc, q);
1919 /* Do not call transmit path if queue is already too full. */
1920 if (__predict_false(tx->used >
1921 MVNETA_TX_RING_CNT - MVNETA_TX_SEGLIMIT))
1924 error = mvneta_tx_queue(sc, m, q);
1925 if (__predict_false(error != 0))
1928 /* Send a copy of the frame to the BPF listener */
1929 ETHER_BPF_MTAP(ifp, *m);
1931 /* Set watchdog on */
1932 tx->watchdog_time = ticks;
1933 tx->queue_status = MVNETA_QUEUE_WORKING;
1938 #ifdef MVNETA_MULTIQUEUE
1940 mvneta_transmit(struct ifnet *ifp, struct mbuf *m)
1942 struct mvneta_softc *sc;
1943 struct mvneta_tx_ring *tx;
1949 /* Use default queue if there is no flow id as thread can migrate. */
1950 if (__predict_true(M_HASHTYPE_GET(m) != M_HASHTYPE_NONE))
1951 q = m->m_pkthdr.flowid % MVNETA_TX_QNUM_MAX;
1955 tx = MVNETA_TX_RING(sc, q);
1957 /* If buf_ring is full start transmit immediatly. */
1958 if (buf_ring_full(tx->br)) {
1959 mvneta_tx_lockq(sc, q);
1960 mvneta_xmit_locked(sc, q);
1961 mvneta_tx_unlockq(sc, q);
1965 * If the buf_ring is empty we will not reorder packets.
1966 * If the lock is available transmit without using buf_ring.
1968 if (buf_ring_empty(tx->br) && mvneta_tx_trylockq(sc, q) != 0) {
1969 error = mvneta_xmitfast_locked(sc, q, &m);
1970 mvneta_tx_unlockq(sc, q);
1971 if (__predict_true(error == 0))
1974 /* Transmit can fail in fastpath. */
1975 if (__predict_false(m == NULL))
1979 /* Enqueue then schedule taskqueue. */
1980 error = drbr_enqueue(ifp, tx->br, m);
1981 if (__predict_false(error != 0))
1984 taskqueue_enqueue(tx->taskq, &tx->task);
1989 mvneta_xmit_locked(struct mvneta_softc *sc, int q)
1992 struct mvneta_tx_ring *tx;
1996 KASSERT_TX_MTX(sc, q);
1998 tx = MVNETA_TX_RING(sc, q);
2001 while ((m = drbr_peek(ifp, tx->br)) != NULL) {
2002 error = mvneta_xmitfast_locked(sc, q, &m);
2003 if (__predict_false(error != 0)) {
2005 drbr_putback(ifp, tx->br, m);
2007 drbr_advance(ifp, tx->br);
2010 drbr_advance(ifp, tx->br);
2015 #else /* !MVNETA_MULTIQUEUE */
2017 mvneta_start(struct ifnet *ifp)
2019 struct mvneta_softc *sc;
2020 struct mvneta_tx_ring *tx;
2024 tx = MVNETA_TX_RING(sc, 0);
2026 mvneta_tx_lockq(sc, 0);
2027 error = mvneta_xmit_locked(sc, 0);
2028 mvneta_tx_unlockq(sc, 0);
2029 /* Handle retransmit in the background taskq. */
2030 if (__predict_false(error != 0 && error != ENETDOWN))
2031 taskqueue_enqueue(tx->taskq, &tx->task);
2035 mvneta_xmit_locked(struct mvneta_softc *sc, int q)
2038 struct mvneta_tx_ring *tx;
2042 KASSERT_TX_MTX(sc, q);
2044 tx = MVNETA_TX_RING(sc, 0);
2047 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
2048 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
2052 error = mvneta_xmitfast_locked(sc, q, &m);
2053 if (__predict_false(error != 0)) {
2055 IFQ_DRV_PREPEND(&ifp->if_snd, m);
2065 mvneta_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2067 struct mvneta_softc *sc;
2068 struct mvneta_rx_ring *rx;
2076 ifr = (struct ifreq *)data;
2080 if (ifp->if_flags & IFF_UP) {
2081 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2082 flags = ifp->if_flags ^ sc->mvneta_if_flags;
2085 sc->mvneta_if_flags = ifp->if_flags;
2087 if ((flags & IFF_PROMISC) != 0)
2088 mvneta_filter_setup(sc);
2090 mvneta_init_locked(sc);
2091 sc->mvneta_if_flags = ifp->if_flags;
2092 if (sc->phy_attached)
2093 mii_mediachg(sc->mii);
2094 mvneta_sc_unlock(sc);
2097 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2098 mvneta_stop_locked(sc);
2100 sc->mvneta_if_flags = ifp->if_flags;
2101 mvneta_sc_unlock(sc);
2104 if (ifp->if_mtu > MVNETA_MAX_CSUM_MTU &&
2105 ifr->ifr_reqcap & IFCAP_TXCSUM)
2106 ifr->ifr_reqcap &= ~IFCAP_TXCSUM;
2107 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
2108 if (mask & IFCAP_HWCSUM) {
2109 ifp->if_capenable &= ~IFCAP_HWCSUM;
2110 ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
2111 if (ifp->if_capenable & IFCAP_TXCSUM)
2112 ifp->if_hwassist = CSUM_IP | CSUM_TCP |
2115 ifp->if_hwassist = 0;
2117 if (mask & IFCAP_LRO) {
2119 ifp->if_capenable ^= IFCAP_LRO;
2120 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2121 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
2122 rx = MVNETA_RX_RING(sc, q);
2123 rx->lro_enabled = !rx->lro_enabled;
2126 mvneta_sc_unlock(sc);
2128 VLAN_CAPABILITIES(ifp);
2131 if ((IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T ||
2132 IFM_SUBTYPE(ifr->ifr_media) == IFM_2500_T) &&
2133 (ifr->ifr_media & IFM_FDX) == 0) {
2134 device_printf(sc->dev,
2135 "%s half-duplex unsupported\n",
2136 IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T ?
2142 case SIOCGIFMEDIA: /* FALLTHROUGH */
2144 if (!sc->phy_attached)
2145 error = ifmedia_ioctl(ifp, ifr, &sc->mvneta_ifmedia,
2148 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media,
2152 if (ifr->ifr_mtu < 68 || ifr->ifr_mtu > MVNETA_MAX_FRAME -
2153 MVNETA_ETHER_SIZE) {
2156 ifp->if_mtu = ifr->ifr_mtu;
2158 if (ifp->if_mtu > MVNETA_MAX_CSUM_MTU) {
2159 ifp->if_capenable &= ~IFCAP_TXCSUM;
2160 ifp->if_hwassist = 0;
2162 ifp->if_capenable |= IFCAP_TXCSUM;
2163 ifp->if_hwassist = CSUM_IP | CSUM_TCP |
2167 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2168 /* Trigger reinitialize sequence */
2169 mvneta_stop_locked(sc);
2170 mvneta_init_locked(sc);
2172 mvneta_sc_unlock(sc);
2177 error = ether_ioctl(ifp, cmd, data);
2185 mvneta_init_locked(void *arg)
2187 struct mvneta_softc *sc;
2195 if (!device_is_attached(sc->dev) ||
2196 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2199 mvneta_disable_intr(sc);
2200 callout_stop(&sc->tick_ch);
2202 /* Get the latest mac address */
2203 bcopy(IF_LLADDR(ifp), sc->enaddr, ETHER_ADDR_LEN);
2204 mvneta_set_mac_address(sc, sc->enaddr);
2205 mvneta_filter_setup(sc);
2207 /* Start DMA Engine */
2208 MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000000);
2209 MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000000);
2210 MVNETA_WRITE(sc, MVNETA_PACC, MVNETA_PACC_ACCELERATIONMODE_EDM);
2213 reg = MVNETA_READ(sc, MVNETA_PMACC0);
2214 reg |= MVNETA_PMACC0_PORTEN;
2215 MVNETA_WRITE(sc, MVNETA_PMACC0, reg);
2217 /* Allow access to each TXQ/RXQ from both CPU's */
2218 for (cpu = 0; cpu < mp_ncpus; ++cpu)
2219 MVNETA_WRITE(sc, MVNETA_PCP2Q(cpu),
2220 MVNETA_PCP2Q_TXQEN_MASK | MVNETA_PCP2Q_RXQEN_MASK);
2222 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
2223 mvneta_rx_lockq(sc, q);
2224 mvneta_rx_queue_refill(sc, q);
2225 mvneta_rx_unlockq(sc, q);
2228 if (!sc->phy_attached)
2231 /* Enable interrupt */
2232 mvneta_enable_intr(sc);
2235 callout_schedule(&sc->tick_ch, hz);
2237 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2241 mvneta_init(void *arg)
2243 struct mvneta_softc *sc;
2247 mvneta_init_locked(sc);
2248 if (sc->phy_attached)
2249 mii_mediachg(sc->mii);
2250 mvneta_sc_unlock(sc);
2255 mvneta_stop_locked(struct mvneta_softc *sc)
2258 struct mvneta_rx_ring *rx;
2259 struct mvneta_tx_ring *tx;
2264 if (ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2267 mvneta_disable_intr(sc);
2269 callout_stop(&sc->tick_ch);
2271 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2274 if (sc->linkup == TRUE)
2275 mvneta_linkdown(sc);
2277 /* Reset the MAC Port Enable bit */
2278 reg = MVNETA_READ(sc, MVNETA_PMACC0);
2279 reg &= ~MVNETA_PMACC0_PORTEN;
2280 MVNETA_WRITE(sc, MVNETA_PMACC0, reg);
2282 /* Disable each of queue */
2283 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
2284 rx = MVNETA_RX_RING(sc, q);
2286 mvneta_rx_lockq(sc, q);
2287 mvneta_ring_flush_rx_queue(sc, q);
2288 mvneta_rx_unlockq(sc, q);
2292 * Hold Reset state of DMA Engine
2293 * (must write 0x0 to restart it)
2295 MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000001);
2296 MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000001);
2298 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
2299 tx = MVNETA_TX_RING(sc, q);
2301 mvneta_tx_lockq(sc, q);
2302 mvneta_ring_flush_tx_queue(sc, q);
2303 mvneta_tx_unlockq(sc, q);
2308 mvneta_stop(struct mvneta_softc *sc)
2312 mvneta_stop_locked(sc);
2313 mvneta_sc_unlock(sc);
2317 mvneta_mediachange(struct ifnet *ifp)
2319 struct mvneta_softc *sc;
2323 if (!sc->phy_attached && !sc->use_inband_status) {
2324 /* We shouldn't be here */
2325 if_printf(ifp, "Cannot change media in fixed-link mode!\n");
2329 if (sc->use_inband_status) {
2330 mvneta_update_media(sc, sc->mvneta_ifmedia.ifm_media);
2337 mii_mediachg(sc->mii);
2339 mvneta_sc_unlock(sc);
2345 mvneta_get_media(struct mvneta_softc *sc, struct ifmediareq *ifmr)
2349 psr = MVNETA_READ(sc, MVNETA_PSR);
2352 if (psr & MVNETA_PSR_GMIISPEED)
2353 ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_1000_T);
2354 else if (psr & MVNETA_PSR_MIISPEED)
2355 ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_100_TX);
2356 else if (psr & MVNETA_PSR_LINKUP)
2357 ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_10_T);
2360 if (psr & MVNETA_PSR_FULLDX)
2361 ifmr->ifm_active |= IFM_FDX;
2364 ifmr->ifm_status = IFM_AVALID;
2365 if (psr & MVNETA_PSR_LINKUP)
2366 ifmr->ifm_status |= IFM_ACTIVE;
2370 mvneta_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2372 struct mvneta_softc *sc;
2373 struct mii_data *mii;
2377 if (!sc->phy_attached && !sc->use_inband_status) {
2378 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
2384 if (sc->use_inband_status) {
2385 mvneta_get_media(sc, ifmr);
2386 mvneta_sc_unlock(sc);
2393 ifmr->ifm_active = mii->mii_media_active;
2394 ifmr->ifm_status = mii->mii_media_status;
2396 mvneta_sc_unlock(sc);
2403 mvneta_update_autoneg(struct mvneta_softc *sc, int enable)
2408 reg = MVNETA_READ(sc, MVNETA_PANC);
2409 reg &= ~(MVNETA_PANC_FORCELINKFAIL | MVNETA_PANC_FORCELINKPASS |
2410 MVNETA_PANC_ANFCEN);
2411 reg |= MVNETA_PANC_ANDUPLEXEN | MVNETA_PANC_ANSPEEDEN |
2412 MVNETA_PANC_INBANDANEN;
2413 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2415 reg = MVNETA_READ(sc, MVNETA_PMACC2);
2416 reg |= MVNETA_PMACC2_INBANDANMODE;
2417 MVNETA_WRITE(sc, MVNETA_PMACC2, reg);
2419 reg = MVNETA_READ(sc, MVNETA_PSOMSCD);
2420 reg |= MVNETA_PSOMSCD_ENABLE;
2421 MVNETA_WRITE(sc, MVNETA_PSOMSCD, reg);
2423 reg = MVNETA_READ(sc, MVNETA_PANC);
2424 reg &= ~(MVNETA_PANC_FORCELINKFAIL | MVNETA_PANC_FORCELINKPASS |
2425 MVNETA_PANC_ANDUPLEXEN | MVNETA_PANC_ANSPEEDEN |
2426 MVNETA_PANC_INBANDANEN);
2427 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2429 reg = MVNETA_READ(sc, MVNETA_PMACC2);
2430 reg &= ~MVNETA_PMACC2_INBANDANMODE;
2431 MVNETA_WRITE(sc, MVNETA_PMACC2, reg);
2433 reg = MVNETA_READ(sc, MVNETA_PSOMSCD);
2434 reg &= ~MVNETA_PSOMSCD_ENABLE;
2435 MVNETA_WRITE(sc, MVNETA_PSOMSCD, reg);
2440 mvneta_update_media(struct mvneta_softc *sc, int media)
2449 mvneta_linkreset(sc);
2451 running = (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
2453 mvneta_stop_locked(sc);
2455 sc->autoneg = (IFM_SUBTYPE(media) == IFM_AUTO);
2457 if (sc->use_inband_status)
2458 mvneta_update_autoneg(sc, IFM_SUBTYPE(media) == IFM_AUTO);
2460 mvneta_update_eee(sc);
2461 mvneta_update_fc(sc);
2463 if (IFM_SUBTYPE(media) != IFM_AUTO) {
2464 reg = MVNETA_READ(sc, MVNETA_PANC);
2465 reg &= ~(MVNETA_PANC_SETGMIISPEED |
2466 MVNETA_PANC_SETMIISPEED |
2467 MVNETA_PANC_SETFULLDX);
2468 if (IFM_SUBTYPE(media) == IFM_1000_T ||
2469 IFM_SUBTYPE(media) == IFM_2500_T) {
2470 if ((media & IFM_FDX) == 0) {
2471 device_printf(sc->dev,
2472 "%s half-duplex unsupported\n",
2473 IFM_SUBTYPE(media) == IFM_1000_T ?
2479 reg |= MVNETA_PANC_SETGMIISPEED;
2480 } else if (IFM_SUBTYPE(media) == IFM_100_TX)
2481 reg |= MVNETA_PANC_SETMIISPEED;
2483 if (media & IFM_FDX)
2484 reg |= MVNETA_PANC_SETFULLDX;
2486 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2490 mvneta_init_locked(sc);
2491 mvneta_sc_unlock(sc);
2496 mvneta_adjust_link(struct mvneta_softc *sc)
2498 boolean_t phy_linkup;
2502 mvneta_update_eee(sc);
2503 mvneta_update_fc(sc);
2505 /* Check for link change */
2506 phy_linkup = (sc->mii->mii_media_status &
2507 (IFM_AVALID | IFM_ACTIVE)) == (IFM_AVALID | IFM_ACTIVE);
2509 if (sc->linkup != phy_linkup)
2510 mvneta_linkupdate(sc, phy_linkup);
2512 /* Don't update media on disabled link */
2516 /* Check for media type change */
2517 if (sc->mvneta_media != sc->mii->mii_media_active) {
2518 sc->mvneta_media = sc->mii->mii_media_active;
2520 reg = MVNETA_READ(sc, MVNETA_PANC);
2521 reg &= ~(MVNETA_PANC_SETGMIISPEED |
2522 MVNETA_PANC_SETMIISPEED |
2523 MVNETA_PANC_SETFULLDX);
2524 if (IFM_SUBTYPE(sc->mvneta_media) == IFM_1000_T ||
2525 IFM_SUBTYPE(sc->mvneta_media) == IFM_2500_T) {
2526 reg |= MVNETA_PANC_SETGMIISPEED;
2527 } else if (IFM_SUBTYPE(sc->mvneta_media) == IFM_100_TX)
2528 reg |= MVNETA_PANC_SETMIISPEED;
2530 if (sc->mvneta_media & IFM_FDX)
2531 reg |= MVNETA_PANC_SETFULLDX;
2533 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2538 mvneta_link_isr(struct mvneta_softc *sc)
2544 linkup = MVNETA_IS_LINKUP(sc) ? TRUE : FALSE;
2545 if (sc->linkup == linkup)
2551 mvneta_linkdown(sc);
2555 "%s: link %s\n", device_xname(sc->dev), linkup ? "up" : "down");
2560 mvneta_linkupdate(struct mvneta_softc *sc, boolean_t linkup)
2568 mvneta_linkdown(sc);
2572 "%s: link %s\n", device_xname(sc->dev), linkup ? "up" : "down");
2577 mvneta_update_eee(struct mvneta_softc *sc)
2583 /* set EEE parameters */
2584 reg = MVNETA_READ(sc, MVNETA_LPIC1);
2586 reg |= MVNETA_LPIC1_LPIRE;
2588 reg &= ~MVNETA_LPIC1_LPIRE;
2589 MVNETA_WRITE(sc, MVNETA_LPIC1, reg);
2593 mvneta_update_fc(struct mvneta_softc *sc)
2599 reg = MVNETA_READ(sc, MVNETA_PANC);
2601 /* Flow control negotiation */
2602 reg |= MVNETA_PANC_PAUSEADV;
2603 reg |= MVNETA_PANC_ANFCEN;
2605 /* Disable flow control negotiation */
2606 reg &= ~MVNETA_PANC_PAUSEADV;
2607 reg &= ~MVNETA_PANC_ANFCEN;
2610 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2614 mvneta_linkup(struct mvneta_softc *sc)
2620 if (!sc->use_inband_status) {
2621 reg = MVNETA_READ(sc, MVNETA_PANC);
2622 reg |= MVNETA_PANC_FORCELINKPASS;
2623 reg &= ~MVNETA_PANC_FORCELINKFAIL;
2624 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2627 mvneta_qflush(sc->ifp);
2630 if_link_state_change(sc->ifp, LINK_STATE_UP);
2634 mvneta_linkdown(struct mvneta_softc *sc)
2640 if (!sc->use_inband_status) {
2641 reg = MVNETA_READ(sc, MVNETA_PANC);
2642 reg &= ~MVNETA_PANC_FORCELINKPASS;
2643 reg |= MVNETA_PANC_FORCELINKFAIL;
2644 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2647 mvneta_portdown(sc);
2648 mvneta_qflush(sc->ifp);
2650 if_link_state_change(sc->ifp, LINK_STATE_DOWN);
2654 mvneta_linkreset(struct mvneta_softc *sc)
2656 struct mii_softc *mii;
2658 if (sc->phy_attached) {
2659 /* Force reset PHY */
2660 mii = LIST_FIRST(&sc->mii->mii_phys);
2670 mvneta_tx_queue(struct mvneta_softc *sc, struct mbuf **mbufp, int q)
2673 bus_dma_segment_t txsegs[MVNETA_TX_SEGLIMIT];
2674 struct mbuf *mtmp, *mbuf;
2675 struct mvneta_tx_ring *tx;
2676 struct mvneta_buf *txbuf;
2677 struct mvneta_tx_desc *t;
2679 int start, used, error, i, txnsegs;
2682 tx = MVNETA_TX_RING(sc, q);
2683 DASSERT(tx->used >= 0);
2684 DASSERT(tx->used <= MVNETA_TX_RING_CNT);
2688 if (__predict_false(mbuf->m_flags & M_VLANTAG)) {
2689 mbuf = ether_vlanencap(mbuf, mbuf->m_pkthdr.ether_vtag);
2695 mbuf->m_flags &= ~M_VLANTAG;
2699 if (__predict_false(mbuf->m_next != NULL &&
2700 (mbuf->m_pkthdr.csum_flags &
2701 (CSUM_IP | CSUM_TCP | CSUM_UDP)) != 0)) {
2702 if (M_WRITABLE(mbuf) == 0) {
2703 mtmp = m_dup(mbuf, M_NOWAIT);
2710 *mbufp = mbuf = mtmp;
2714 /* load mbuf using dmamap of 1st descriptor */
2715 txbuf = &tx->txbuf[tx->cpu];
2716 error = bus_dmamap_load_mbuf_sg(sc->txmbuf_dtag,
2717 txbuf->dmap, mbuf, txsegs, &txnsegs,
2719 if (__predict_false(error != 0)) {
2721 CTR3(KTR_SPARE2, "%s:%u bus_dmamap_load_mbuf_sg error=%d", ifp->if_xname, q, error);
2723 /* This is the only recoverable error (except EFBIG). */
2724 if (error != ENOMEM) {
2733 if (__predict_false(txnsegs <= 0
2734 || (txnsegs + tx->used) > MVNETA_TX_RING_CNT)) {
2735 /* we have no enough descriptors or mbuf is broken */
2737 CTR3(KTR_SPARE2, "%s:%u not enough descriptors txnsegs=%d",
2738 ifp->if_xname, q, txnsegs);
2740 bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
2743 DASSERT(txbuf->m == NULL);
2745 /* remember mbuf using 1st descriptor */
2747 bus_dmamap_sync(sc->txmbuf_dtag, txbuf->dmap,
2748 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2750 /* load to tx descriptors */
2753 for (i = 0; i < txnsegs; i++) {
2754 t = &tx->desc[tx->cpu];
2758 if (__predict_true(i == 0)) {
2759 /* 1st descriptor */
2760 t->command |= MVNETA_TX_CMD_W_PACKET_OFFSET(0);
2761 t->command |= MVNETA_TX_CMD_F;
2762 mvneta_tx_set_csumflag(ifp, t, mbuf);
2764 t->bufptr_pa = txsegs[i].ds_addr;
2765 t->bytecnt = txsegs[i].ds_len;
2766 tx->cpu = tx_counter_adv(tx->cpu, 1);
2771 /* t is last descriptor here */
2773 t->command |= MVNETA_TX_CMD_L|MVNETA_TX_CMD_PADDING;
2775 bus_dmamap_sync(sc->tx_dtag, tx->desc_map,
2776 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2778 while (__predict_false(used > 255)) {
2779 ptxsu = MVNETA_PTXSU_NOWD(255);
2780 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
2783 if (__predict_true(used > 0)) {
2784 ptxsu = MVNETA_PTXSU_NOWD(used);
2785 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
2791 mvneta_tx_set_csumflag(struct ifnet *ifp,
2792 struct mvneta_tx_desc *t, struct mbuf *m)
2794 struct ether_header *eh;
2796 uint32_t iphl, ipoff;
2800 csum_flags = ifp->if_hwassist & m->m_pkthdr.csum_flags;
2801 eh = mtod(m, struct ether_header *);
2802 switch (ntohs(eh->ether_type)) {
2804 ipoff = ETHER_HDR_LEN;
2806 case ETHERTYPE_IPV6:
2808 case ETHERTYPE_VLAN:
2809 ipoff = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2813 if (__predict_true(csum_flags & (CSUM_IP|CSUM_IP_TCP|CSUM_IP_UDP))) {
2814 ip = (struct ip *)(m->m_data + ipoff);
2815 iphl = ip->ip_hl<<2;
2816 t->command |= MVNETA_TX_CMD_L3_IP4;
2818 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NONE;
2824 if (csum_flags & CSUM_IP) {
2825 t->command |= MVNETA_TX_CMD_IP4_CHECKSUM;
2829 if (csum_flags & CSUM_IP_TCP) {
2830 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG;
2831 t->command |= MVNETA_TX_CMD_L4_TCP;
2832 } else if (csum_flags & CSUM_IP_UDP) {
2833 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG;
2834 t->command |= MVNETA_TX_CMD_L4_UDP;
2836 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NONE;
2839 t->command |= MVNETA_TX_CMD_IP_HEADER_LEN(iphl >> 2);
2840 t->command |= MVNETA_TX_CMD_L3_OFFSET(ipoff);
2844 mvneta_tx_queue_complete(struct mvneta_softc *sc, int q)
2846 struct mvneta_tx_ring *tx;
2847 struct mvneta_buf *txbuf;
2848 struct mvneta_tx_desc *t;
2849 uint32_t ptxs, ptxsu, ndesc;
2852 KASSERT_TX_MTX(sc, q);
2854 tx = MVNETA_TX_RING(sc, q);
2855 if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED))
2858 ptxs = MVNETA_READ(sc, MVNETA_PTXS(q));
2859 ndesc = MVNETA_PTXS_GET_TBC(ptxs);
2861 if (__predict_false(ndesc == 0)) {
2863 tx->queue_status = MVNETA_QUEUE_IDLE;
2864 else if (tx->queue_status == MVNETA_QUEUE_WORKING &&
2865 ((ticks - tx->watchdog_time) > MVNETA_WATCHDOG))
2866 tx->queue_hung = TRUE;
2871 CTR3(KTR_SPARE2, "%s:%u tx_complete begin ndesc=%u",
2872 sc->ifp->if_xname, q, ndesc);
2875 bus_dmamap_sync(sc->tx_dtag, tx->desc_map,
2876 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2878 for (i = 0; i < ndesc; i++) {
2879 t = &tx->desc[tx->dma];
2881 if (t->flags & MVNETA_TX_F_ES)
2882 CTR3(KTR_SPARE2, "%s tx error queue %d desc %d",
2883 sc->ifp->if_xname, q, tx->dma);
2885 txbuf = &tx->txbuf[tx->dma];
2886 if (__predict_true(txbuf->m != NULL)) {
2887 DASSERT((t->command & MVNETA_TX_CMD_F) != 0);
2888 bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
2893 DASSERT((t->flags & MVNETA_TX_CMD_F) == 0);
2894 tx->dma = tx_counter_adv(tx->dma, 1);
2897 DASSERT(tx->used >= 0);
2898 DASSERT(tx->used <= MVNETA_TX_RING_CNT);
2899 while (__predict_false(ndesc > 255)) {
2900 ptxsu = MVNETA_PTXSU_NORB(255);
2901 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
2904 if (__predict_true(ndesc > 0)) {
2905 ptxsu = MVNETA_PTXSU_NORB(ndesc);
2906 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
2909 CTR5(KTR_SPARE2, "%s:%u tx_complete tx_cpu=%d tx_dma=%d tx_used=%d",
2910 sc->ifp->if_xname, q, tx->cpu, tx->dma, tx->used);
2913 tx->watchdog_time = ticks;
2916 tx->queue_status = MVNETA_QUEUE_IDLE;
2920 * Do a final TX complete when TX is idle.
2923 mvneta_tx_drain(struct mvneta_softc *sc)
2925 struct mvneta_tx_ring *tx;
2929 * Handle trailing mbuf on TX queue.
2930 * Check is done lockess to avoid TX path contention.
2932 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
2933 tx = MVNETA_TX_RING(sc, q);
2934 if ((ticks - tx->watchdog_time) > MVNETA_WATCHDOG_TXCOMP &&
2936 mvneta_tx_lockq(sc, q);
2937 mvneta_tx_queue_complete(sc, q);
2938 mvneta_tx_unlockq(sc, q);
2947 mvneta_rx(struct mvneta_softc *sc, int q, int count)
2949 uint32_t prxs, npkt;
2953 mvneta_rx_lockq(sc, q);
2954 prxs = MVNETA_READ(sc, MVNETA_PRXS(q));
2955 npkt = MVNETA_PRXS_GET_ODC(prxs);
2956 if (__predict_false(npkt == 0))
2959 if (count > 0 && npkt > count) {
2963 mvneta_rx_queue(sc, q, npkt);
2965 mvneta_rx_unlockq(sc, q);
2970 * Helper routine for updating PRXSU register of a given queue.
2971 * Handles number of processed descriptors bigger than maximum acceptable value.
2973 STATIC __inline void
2974 mvneta_prxsu_update(struct mvneta_softc *sc, int q, int processed)
2978 while (__predict_false(processed > 255)) {
2979 prxsu = MVNETA_PRXSU_NOOFPROCESSEDDESCRIPTORS(255);
2980 MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
2983 prxsu = MVNETA_PRXSU_NOOFPROCESSEDDESCRIPTORS(processed);
2984 MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
2987 static __inline void
2988 mvneta_prefetch(void *p)
2991 __builtin_prefetch(p);
2995 mvneta_rx_queue(struct mvneta_softc *sc, int q, int npkt)
2998 struct mvneta_rx_ring *rx;
2999 struct mvneta_rx_desc *r;
3000 struct mvneta_buf *rxbuf;
3002 struct lro_ctrl *lro;
3003 struct lro_entry *queued;
3005 int i, pktlen, processed, ndma;
3007 KASSERT_RX_MTX(sc, q);
3010 rx = MVNETA_RX_RING(sc, q);
3013 if (__predict_false(rx->queue_status == MVNETA_QUEUE_DISABLED))
3016 bus_dmamap_sync(sc->rx_dtag, rx->desc_map,
3017 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3019 for (i = 0; i < npkt; i++) {
3020 /* Prefetch next desc, rxbuf. */
3021 ndma = rx_counter_adv(rx->dma, 1);
3022 mvneta_prefetch(&rx->desc[ndma]);
3023 mvneta_prefetch(&rx->rxbuf[ndma]);
3025 /* get descriptor and packet */
3026 r = &rx->desc[rx->dma];
3027 rxbuf = &rx->rxbuf[rx->dma];
3031 bus_dmamap_sync(sc->rxbuf_dtag, rxbuf->dmap,
3032 BUS_DMASYNC_POSTREAD);
3033 bus_dmamap_unload(sc->rxbuf_dtag, rxbuf->dmap);
3034 /* Prefetch mbuf header. */
3038 /* Drop desc with error status or not in a single buffer. */
3039 DASSERT((r->status & (MVNETA_RX_F|MVNETA_RX_L)) ==
3040 (MVNETA_RX_F|MVNETA_RX_L));
3041 if (__predict_false((r->status & MVNETA_RX_ES) ||
3042 (r->status & (MVNETA_RX_F|MVNETA_RX_L)) !=
3043 (MVNETA_RX_F|MVNETA_RX_L)))
3047 * [ OFF | MH | PKT | CRC ]
3048 * bytecnt cover MH, PKT, CRC
3050 pktlen = r->bytecnt - ETHER_CRC_LEN - MVNETA_HWHEADER_SIZE;
3051 pktbuf = (uint8_t *)rx->rxbuf_virt_addr[rx->dma] + MVNETA_PACKET_OFFSET +
3052 MVNETA_HWHEADER_SIZE;
3054 /* Prefetch mbuf data. */
3055 mvneta_prefetch(pktbuf);
3057 /* Write value to mbuf (avoid read). */
3059 m->m_len = m->m_pkthdr.len = pktlen;
3060 m->m_pkthdr.rcvif = ifp;
3061 mvneta_rx_set_csumflag(ifp, r, m);
3063 /* Increase rx_dma before releasing the lock. */
3066 if (__predict_false(rx->lro_enabled &&
3067 ((r->status & MVNETA_RX_L3_IP) != 0) &&
3068 ((r->status & MVNETA_RX_L4_MASK) == MVNETA_RX_L4_TCP) &&
3069 (m->m_pkthdr.csum_flags &
3070 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
3071 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR))) {
3072 if (rx->lro.lro_cnt != 0) {
3073 if (tcp_lro_rx(&rx->lro, m, 0) == 0)
3078 mvneta_rx_unlockq(sc, q);
3079 (*ifp->if_input)(ifp, m);
3080 mvneta_rx_lockq(sc, q);
3082 * Check whether this queue has been disabled in the
3083 * meantime. If yes, then clear LRO and exit.
3085 if(__predict_false(rx->queue_status == MVNETA_QUEUE_DISABLED))
3088 /* Refresh receive ring to avoid stall and minimize jitter. */
3089 if (processed >= MVNETA_RX_REFILL_COUNT) {
3090 mvneta_prxsu_update(sc, q, processed);
3091 mvneta_rx_queue_refill(sc, q);
3098 /* Refresh receive ring to avoid stall and minimize jitter. */
3099 if (processed >= MVNETA_RX_REFILL_COUNT) {
3100 mvneta_prxsu_update(sc, q, processed);
3101 mvneta_rx_queue_refill(sc, q);
3106 CTR3(KTR_SPARE2, "%s:%u %u packets received", ifp->if_xname, q, npkt);
3108 /* DMA status update */
3109 mvneta_prxsu_update(sc, q, processed);
3110 /* Refill the rest of buffers if there are any to refill */
3111 mvneta_rx_queue_refill(sc, q);
3115 * Flush any outstanding LRO work
3118 while (__predict_false((queued = LIST_FIRST(&lro->lro_active)) != NULL)) {
3119 LIST_REMOVE(LIST_FIRST((&lro->lro_active)), next);
3120 tcp_lro_flush(lro, queued);
3125 mvneta_rx_buf_free(struct mvneta_softc *sc, struct mvneta_buf *rxbuf)
3128 bus_dmamap_unload(sc->rxbuf_dtag, rxbuf->dmap);
3129 /* This will remove all data at once */
3134 mvneta_rx_queue_refill(struct mvneta_softc *sc, int q)
3136 struct mvneta_rx_ring *rx;
3137 struct mvneta_rx_desc *r;
3138 struct mvneta_buf *rxbuf;
3139 bus_dma_segment_t segs;
3141 uint32_t prxs, prxsu, ndesc;
3142 int npkt, refill, nsegs, error;
3144 KASSERT_RX_MTX(sc, q);
3146 rx = MVNETA_RX_RING(sc, q);
3147 prxs = MVNETA_READ(sc, MVNETA_PRXS(q));
3148 ndesc = MVNETA_PRXS_GET_NODC(prxs) + MVNETA_PRXS_GET_ODC(prxs);
3149 refill = MVNETA_RX_RING_CNT - ndesc;
3151 CTR3(KTR_SPARE2, "%s:%u refill %u packets", sc->ifp->if_xname, q,
3154 if (__predict_false(refill <= 0))
3157 for (npkt = 0; npkt < refill; npkt++) {
3158 rxbuf = &rx->rxbuf[rx->cpu];
3159 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3160 if (__predict_false(m == NULL)) {
3164 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3166 error = bus_dmamap_load_mbuf_sg(sc->rxbuf_dtag, rxbuf->dmap,
3167 m, &segs, &nsegs, BUS_DMA_NOWAIT);
3168 if (__predict_false(error != 0 || nsegs != 1)) {
3169 KASSERT(1, ("Failed to load Rx mbuf DMA map"));
3174 /* Add the packet to the ring */
3176 r = &rx->desc[rx->cpu];
3177 r->bufptr_pa = segs.ds_addr;
3178 rx->rxbuf_virt_addr[rx->cpu] = m->m_data;
3180 rx->cpu = rx_counter_adv(rx->cpu, 1);
3183 if (refill == MVNETA_RX_RING_CNT)
3184 rx->needs_refill = TRUE;
3188 rx->needs_refill = FALSE;
3189 bus_dmamap_sync(sc->rx_dtag, rx->desc_map, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3191 while (__predict_false(npkt > 255)) {
3192 prxsu = MVNETA_PRXSU_NOOFNEWDESCRIPTORS(255);
3193 MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
3196 if (__predict_true(npkt > 0)) {
3197 prxsu = MVNETA_PRXSU_NOOFNEWDESCRIPTORS(npkt);
3198 MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
3202 STATIC __inline void
3203 mvneta_rx_set_csumflag(struct ifnet *ifp,
3204 struct mvneta_rx_desc *r, struct mbuf *m)
3206 uint32_t csum_flags;
3209 if (__predict_false((r->status &
3210 (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP)) == 0))
3211 return; /* not a IP packet */
3214 if (__predict_true((r->status & MVNETA_RX_IP_HEADER_OK) ==
3215 MVNETA_RX_IP_HEADER_OK))
3216 csum_flags |= CSUM_L3_CALC|CSUM_L3_VALID;
3218 if (__predict_true((r->status & (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP)) ==
3219 (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP))) {
3221 switch (r->status & MVNETA_RX_L4_MASK) {
3222 case MVNETA_RX_L4_TCP:
3223 case MVNETA_RX_L4_UDP:
3224 csum_flags |= CSUM_L4_CALC;
3225 if (__predict_true((r->status &
3226 MVNETA_RX_L4_CHECKSUM_OK) == MVNETA_RX_L4_CHECKSUM_OK)) {
3227 csum_flags |= CSUM_L4_VALID;
3228 m->m_pkthdr.csum_data = htons(0xffff);
3231 case MVNETA_RX_L4_OTH:
3236 m->m_pkthdr.csum_flags = csum_flags;
3240 * MAC address filter
3243 mvneta_filter_setup(struct mvneta_softc *sc)
3246 uint32_t dfut[MVNETA_NDFUT], dfsmt[MVNETA_NDFSMT], dfomt[MVNETA_NDFOMT];
3252 memset(dfut, 0, sizeof(dfut));
3253 memset(dfsmt, 0, sizeof(dfsmt));
3254 memset(dfomt, 0, sizeof(dfomt));
3257 ifp->if_flags |= IFF_ALLMULTI;
3258 if (ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) {
3259 for (i = 0; i < MVNETA_NDFSMT; i++) {
3260 dfsmt[i] = dfomt[i] =
3261 MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3262 MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3263 MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3264 MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
3268 pxc = MVNETA_READ(sc, MVNETA_PXC);
3269 pxc &= ~(MVNETA_PXC_UPM | MVNETA_PXC_RXQ_MASK | MVNETA_PXC_RXQARP_MASK |
3270 MVNETA_PXC_TCPQ_MASK | MVNETA_PXC_UDPQ_MASK | MVNETA_PXC_BPDUQ_MASK);
3271 pxc |= MVNETA_PXC_RXQ(MVNETA_RX_QNUM_MAX-1);
3272 pxc |= MVNETA_PXC_RXQARP(MVNETA_RX_QNUM_MAX-1);
3273 pxc |= MVNETA_PXC_TCPQ(MVNETA_RX_QNUM_MAX-1);
3274 pxc |= MVNETA_PXC_UDPQ(MVNETA_RX_QNUM_MAX-1);
3275 pxc |= MVNETA_PXC_BPDUQ(MVNETA_RX_QNUM_MAX-1);
3276 pxc |= MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP;
3277 if (ifp->if_flags & IFF_BROADCAST) {
3278 pxc &= ~(MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP);
3280 if (ifp->if_flags & IFF_PROMISC) {
3281 pxc |= MVNETA_PXC_UPM;
3283 MVNETA_WRITE(sc, MVNETA_PXC, pxc);
3285 /* Set Destination Address Filter Unicast Table */
3286 if (ifp->if_flags & IFF_PROMISC) {
3287 /* pass all unicast addresses */
3288 for (i = 0; i < MVNETA_NDFUT; i++) {
3290 MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3291 MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3292 MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3293 MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
3296 i = sc->enaddr[5] & 0xf; /* last nibble */
3297 dfut[i>>2] = MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
3299 MVNETA_WRITE_REGION(sc, MVNETA_DFUT(0), dfut, MVNETA_NDFUT);
3301 /* Set Destination Address Filter Multicast Tables */
3302 MVNETA_WRITE_REGION(sc, MVNETA_DFSMT(0), dfsmt, MVNETA_NDFSMT);
3303 MVNETA_WRITE_REGION(sc, MVNETA_DFOMT(0), dfomt, MVNETA_NDFOMT);
3310 sysctl_read_mib(SYSCTL_HANDLER_ARGS)
3312 struct mvneta_sysctl_mib *arg;
3313 struct mvneta_softc *sc;
3316 arg = (struct mvneta_sysctl_mib *)arg1;
3323 if (arg->index < 0 || arg->index > MVNETA_PORTMIB_NOCOUNTER)
3328 mvneta_sc_unlock(sc);
3329 return sysctl_handle_64(oidp, &val, 0, req);
3334 sysctl_clear_mib(SYSCTL_HANDLER_ARGS)
3336 struct mvneta_softc *sc;
3340 sc = (struct mvneta_softc *)arg1;
3344 err = sysctl_handle_int(oidp, &val, 0, req);
3348 if (val < 0 || val > 1)
3353 mvneta_clear_mib(sc);
3354 mvneta_sc_unlock(sc);
3361 sysctl_set_queue_rxthtime(SYSCTL_HANDLER_ARGS)
3363 struct mvneta_sysctl_queue *arg;
3364 struct mvneta_rx_ring *rx;
3365 struct mvneta_softc *sc;
3366 uint32_t reg, time_mvtclk;
3370 arg = (struct mvneta_sysctl_queue *)arg1;
3373 if (arg->queue < 0 || arg->queue > MVNETA_RX_RING_CNT)
3375 if (arg->rxtx != MVNETA_SYSCTL_RX)
3382 /* read queue length */
3384 mvneta_rx_lockq(sc, arg->queue);
3385 rx = MVNETA_RX_RING(sc, arg->queue);
3386 time_mvtclk = rx->queue_th_time;
3387 time_us = ((uint64_t)time_mvtclk * 1000ULL * 1000ULL) / mvneta_get_clk();
3388 mvneta_rx_unlockq(sc, arg->queue);
3389 mvneta_sc_unlock(sc);
3391 err = sysctl_handle_int(oidp, &time_us, 0, req);
3396 mvneta_rx_lockq(sc, arg->queue);
3398 /* update queue length (0[sec] - 1[sec]) */
3399 if (time_us < 0 || time_us > (1000 * 1000)) {
3400 mvneta_rx_unlockq(sc, arg->queue);
3401 mvneta_sc_unlock(sc);
3405 (uint64_t)mvneta_get_clk() * (uint64_t)time_us / (1000ULL * 1000ULL);
3406 rx->queue_th_time = time_mvtclk;
3407 reg = MVNETA_PRXITTH_RITT(rx->queue_th_time);
3408 MVNETA_WRITE(sc, MVNETA_PRXITTH(arg->queue), reg);
3409 mvneta_rx_unlockq(sc, arg->queue);
3410 mvneta_sc_unlock(sc);
3416 sysctl_mvneta_init(struct mvneta_softc *sc)
3418 struct sysctl_ctx_list *ctx;
3419 struct sysctl_oid_list *children;
3420 struct sysctl_oid_list *rxchildren;
3421 struct sysctl_oid_list *qchildren, *mchildren;
3422 struct sysctl_oid *tree;
3424 struct mvneta_sysctl_queue *rxarg;
3425 #define MVNETA_SYSCTL_NAME(num) "queue" # num
3426 static const char *sysctl_queue_names[] = {
3427 MVNETA_SYSCTL_NAME(0), MVNETA_SYSCTL_NAME(1),
3428 MVNETA_SYSCTL_NAME(2), MVNETA_SYSCTL_NAME(3),
3429 MVNETA_SYSCTL_NAME(4), MVNETA_SYSCTL_NAME(5),
3430 MVNETA_SYSCTL_NAME(6), MVNETA_SYSCTL_NAME(7),
3432 #undef MVNETA_SYSCTL_NAME
3434 #ifndef NO_SYSCTL_DESCR
3435 #define MVNETA_SYSCTL_DESCR(num) "configuration parameters for queue " # num
3436 static const char *sysctl_queue_descrs[] = {
3437 MVNETA_SYSCTL_DESCR(0), MVNETA_SYSCTL_DESCR(1),
3438 MVNETA_SYSCTL_DESCR(2), MVNETA_SYSCTL_DESCR(3),
3439 MVNETA_SYSCTL_DESCR(4), MVNETA_SYSCTL_DESCR(5),
3440 MVNETA_SYSCTL_DESCR(6), MVNETA_SYSCTL_DESCR(7),
3442 #undef MVNETA_SYSCTL_DESCR
3446 ctx = device_get_sysctl_ctx(sc->dev);
3447 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
3449 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rx",
3450 CTLFLAG_RD, 0, "NETA RX");
3451 rxchildren = SYSCTL_CHILDREN(tree);
3452 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "mib",
3453 CTLFLAG_RD, 0, "NETA MIB");
3454 mchildren = SYSCTL_CHILDREN(tree);
3457 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "flow_control",
3458 CTLFLAG_RW, &sc->cf_fc, 0, "flow control");
3459 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lpi",
3460 CTLFLAG_RW, &sc->cf_lpi, 0, "Low Power Idle");
3465 /* dev.mvneta.[unit].mib.<mibs> */
3466 for (i = 0; i < MVNETA_PORTMIB_NOCOUNTER; i++) {
3467 struct mvneta_sysctl_mib *mib_arg = &sc->sysctl_mib[i];
3471 SYSCTL_ADD_PROC(ctx, mchildren, OID_AUTO,
3472 mvneta_mib_list[i].sysctl_name,
3473 CTLTYPE_U64|CTLFLAG_RD, (void *)mib_arg, 0,
3474 sysctl_read_mib, "I", mvneta_mib_list[i].desc);
3476 SYSCTL_ADD_UQUAD(ctx, mchildren, OID_AUTO, "rx_discard",
3477 CTLFLAG_RD, &sc->counter_pdfc, "Port Rx Discard Frame Counter");
3478 SYSCTL_ADD_UQUAD(ctx, mchildren, OID_AUTO, "overrun",
3479 CTLFLAG_RD, &sc->counter_pofc, "Port Overrun Frame Counter");
3480 SYSCTL_ADD_UINT(ctx, mchildren, OID_AUTO, "watchdog",
3481 CTLFLAG_RD, &sc->counter_watchdog, 0, "TX Watchdog Counter");
3483 SYSCTL_ADD_PROC(ctx, mchildren, OID_AUTO, "reset",
3484 CTLTYPE_INT|CTLFLAG_RW, (void *)sc, 0,
3485 sysctl_clear_mib, "I", "Reset MIB counters");
3487 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
3488 rxarg = &sc->sysctl_rx_queue[q];
3492 rxarg->rxtx = MVNETA_SYSCTL_RX;
3494 /* hw.mvneta.mvneta[unit].rx.[queue] */
3495 tree = SYSCTL_ADD_NODE(ctx, rxchildren, OID_AUTO,
3496 sysctl_queue_names[q], CTLFLAG_RD, 0,
3497 sysctl_queue_descrs[q]);
3498 qchildren = SYSCTL_CHILDREN(tree);
3500 /* hw.mvneta.mvneta[unit].rx.[queue].threshold_timer_us */
3501 SYSCTL_ADD_PROC(ctx, qchildren, OID_AUTO, "threshold_timer_us",
3502 CTLTYPE_UINT | CTLFLAG_RW, rxarg, 0,
3503 sysctl_set_queue_rxthtime, "I",
3504 "interrupt coalescing threshold timer [us]");
3512 mvneta_clear_mib(struct mvneta_softc *sc)
3518 for (i = 0; i < nitems(mvneta_mib_list); i++) {
3519 if (mvneta_mib_list[i].reg64)
3520 MVNETA_READ_MIB_8(sc, mvneta_mib_list[i].regnum);
3522 MVNETA_READ_MIB_4(sc, mvneta_mib_list[i].regnum);
3523 sc->sysctl_mib[i].counter = 0;
3525 MVNETA_READ(sc, MVNETA_PDFC);
3526 sc->counter_pdfc = 0;
3527 MVNETA_READ(sc, MVNETA_POFC);
3528 sc->counter_pofc = 0;
3529 sc->counter_watchdog = 0;
3533 mvneta_update_mib(struct mvneta_softc *sc)
3535 struct mvneta_tx_ring *tx;
3540 for (i = 0; i < nitems(mvneta_mib_list); i++) {
3542 if (mvneta_mib_list[i].reg64)
3543 val = MVNETA_READ_MIB_8(sc, mvneta_mib_list[i].regnum);
3545 val = MVNETA_READ_MIB_4(sc, mvneta_mib_list[i].regnum);
3550 sc->sysctl_mib[i].counter += val;
3551 switch (mvneta_mib_list[i].regnum) {
3552 case MVNETA_MIB_RX_GOOD_OCT:
3553 if_inc_counter(sc->ifp, IFCOUNTER_IBYTES, val);
3555 case MVNETA_MIB_RX_BAD_FRAME:
3556 if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, val);
3558 case MVNETA_MIB_RX_GOOD_FRAME:
3559 if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, val);
3561 case MVNETA_MIB_RX_MCAST_FRAME:
3562 if_inc_counter(sc->ifp, IFCOUNTER_IMCASTS, val);
3564 case MVNETA_MIB_TX_GOOD_OCT:
3565 if_inc_counter(sc->ifp, IFCOUNTER_OBYTES, val);
3567 case MVNETA_MIB_TX_GOOD_FRAME:
3568 if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, val);
3570 case MVNETA_MIB_TX_MCAST_FRAME:
3571 if_inc_counter(sc->ifp, IFCOUNTER_OMCASTS, val);
3573 case MVNETA_MIB_MAC_COL:
3574 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, val);
3576 case MVNETA_MIB_TX_MAC_TRNS_ERR:
3577 case MVNETA_MIB_TX_EXCES_COL:
3578 case MVNETA_MIB_MAC_LATE_COL:
3579 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, val);
3584 reg = MVNETA_READ(sc, MVNETA_PDFC);
3585 sc->counter_pdfc += reg;
3586 if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, reg);
3587 reg = MVNETA_READ(sc, MVNETA_POFC);
3588 sc->counter_pofc += reg;
3589 if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, reg);
3592 if (sc->counter_watchdog_mib > 0) {
3593 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, sc->counter_watchdog_mib);
3594 sc->counter_watchdog_mib = 0;
3598 * We do not take queue locks to not disrupt TX path.
3599 * We may only miss one drv error which will be fixed at
3600 * next mib update. We may also clear counter when TX path
3601 * is incrementing it but we only do it if counter was not zero
3602 * thus we may only loose one error.
3604 for (i = 0; i < MVNETA_TX_QNUM_MAX; i++) {
3605 tx = MVNETA_TX_RING(sc, i);
3607 if (tx->drv_error > 0) {
3608 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, tx->drv_error);