2 * Copyright (c) 2017 Stormshield.
3 * Copyright (c) 2017 Semihalf.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
28 #include "opt_platform.h"
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/endian.h>
37 #include <sys/mutex.h>
38 #include <sys/kernel.h>
39 #include <sys/module.h>
40 #include <sys/socket.h>
41 #include <sys/sysctl.h>
43 #include <sys/taskqueue.h>
48 #include <net/ethernet.h>
51 #include <net/if_arp.h>
52 #include <net/if_dl.h>
53 #include <net/if_media.h>
54 #include <net/if_types.h>
55 #include <net/if_vlan_var.h>
57 #include <netinet/in_systm.h>
58 #include <netinet/in.h>
59 #include <netinet/ip.h>
60 #include <netinet/tcp_lro.h>
62 #include <sys/sockio.h>
64 #include <machine/bus.h>
66 #include <machine/resource.h>
68 #include <dev/mii/mii.h>
69 #include <dev/mii/miivar.h>
71 #include <dev/ofw/openfirm.h>
72 #include <dev/ofw/ofw_bus.h>
73 #include <dev/ofw/ofw_bus_subr.h>
75 #include <dev/mdio/mdio.h>
77 #include <arm/mv/mvvar.h>
79 #if !defined(__aarch64__)
80 #include <arm/mv/mvreg.h>
81 #include <arm/mv/mvwin.h>
84 #include "if_mvnetareg.h"
85 #include "if_mvnetavar.h"
87 #include "miibus_if.h"
91 #define STATIC /* nothing */
96 #define DASSERT(x) KASSERT((x), (#x))
98 #define A3700_TCLK_250MHZ 250000000
100 /* Device Register Initialization */
101 STATIC int mvneta_initreg(struct ifnet *);
103 /* Descriptor Ring Control for each of queues */
104 STATIC int mvneta_ring_alloc_rx_queue(struct mvneta_softc *, int);
105 STATIC int mvneta_ring_alloc_tx_queue(struct mvneta_softc *, int);
106 STATIC void mvneta_ring_dealloc_rx_queue(struct mvneta_softc *, int);
107 STATIC void mvneta_ring_dealloc_tx_queue(struct mvneta_softc *, int);
108 STATIC int mvneta_ring_init_rx_queue(struct mvneta_softc *, int);
109 STATIC int mvneta_ring_init_tx_queue(struct mvneta_softc *, int);
110 STATIC void mvneta_ring_flush_rx_queue(struct mvneta_softc *, int);
111 STATIC void mvneta_ring_flush_tx_queue(struct mvneta_softc *, int);
112 STATIC void mvneta_dmamap_cb(void *, bus_dma_segment_t *, int, int);
113 STATIC int mvneta_dma_create(struct mvneta_softc *);
115 /* Rx/Tx Queue Control */
116 STATIC int mvneta_rx_queue_init(struct ifnet *, int);
117 STATIC int mvneta_tx_queue_init(struct ifnet *, int);
118 STATIC int mvneta_rx_queue_enable(struct ifnet *, int);
119 STATIC int mvneta_tx_queue_enable(struct ifnet *, int);
120 STATIC void mvneta_rx_lockq(struct mvneta_softc *, int);
121 STATIC void mvneta_rx_unlockq(struct mvneta_softc *, int);
122 STATIC void mvneta_tx_lockq(struct mvneta_softc *, int);
123 STATIC void mvneta_tx_unlockq(struct mvneta_softc *, int);
125 /* Interrupt Handlers */
126 STATIC void mvneta_disable_intr(struct mvneta_softc *);
127 STATIC void mvneta_enable_intr(struct mvneta_softc *);
128 STATIC void mvneta_rxtxth_intr(void *);
129 STATIC int mvneta_misc_intr(struct mvneta_softc *);
130 STATIC void mvneta_tick(void *);
131 /* struct ifnet and mii callbacks*/
132 STATIC int mvneta_xmitfast_locked(struct mvneta_softc *, int, struct mbuf **);
133 STATIC int mvneta_xmit_locked(struct mvneta_softc *, int);
134 #ifdef MVNETA_MULTIQUEUE
135 STATIC int mvneta_transmit(struct ifnet *, struct mbuf *);
136 #else /* !MVNETA_MULTIQUEUE */
137 STATIC void mvneta_start(struct ifnet *);
139 STATIC void mvneta_qflush(struct ifnet *);
140 STATIC void mvneta_tx_task(void *, int);
141 STATIC int mvneta_ioctl(struct ifnet *, u_long, caddr_t);
142 STATIC void mvneta_init(void *);
143 STATIC void mvneta_init_locked(void *);
144 STATIC void mvneta_stop(struct mvneta_softc *);
145 STATIC void mvneta_stop_locked(struct mvneta_softc *);
146 STATIC int mvneta_mediachange(struct ifnet *);
147 STATIC void mvneta_mediastatus(struct ifnet *, struct ifmediareq *);
148 STATIC void mvneta_portup(struct mvneta_softc *);
149 STATIC void mvneta_portdown(struct mvneta_softc *);
151 /* Link State Notify */
152 STATIC void mvneta_update_autoneg(struct mvneta_softc *, int);
153 STATIC int mvneta_update_media(struct mvneta_softc *, int);
154 STATIC void mvneta_adjust_link(struct mvneta_softc *);
155 STATIC void mvneta_update_eee(struct mvneta_softc *);
156 STATIC void mvneta_update_fc(struct mvneta_softc *);
157 STATIC void mvneta_link_isr(struct mvneta_softc *);
158 STATIC void mvneta_linkupdate(struct mvneta_softc *, boolean_t);
159 STATIC void mvneta_linkup(struct mvneta_softc *);
160 STATIC void mvneta_linkdown(struct mvneta_softc *);
161 STATIC void mvneta_linkreset(struct mvneta_softc *);
164 STATIC int mvneta_tx_queue(struct mvneta_softc *, struct mbuf **, int);
165 STATIC void mvneta_tx_set_csumflag(struct ifnet *,
166 struct mvneta_tx_desc *, struct mbuf *);
167 STATIC void mvneta_tx_queue_complete(struct mvneta_softc *, int);
168 STATIC void mvneta_tx_drain(struct mvneta_softc *);
171 STATIC int mvneta_rx(struct mvneta_softc *, int, int);
172 STATIC void mvneta_rx_queue(struct mvneta_softc *, int, int);
173 STATIC void mvneta_rx_queue_refill(struct mvneta_softc *, int);
174 STATIC void mvneta_rx_set_csumflag(struct ifnet *,
175 struct mvneta_rx_desc *, struct mbuf *);
176 STATIC void mvneta_rx_buf_free(struct mvneta_softc *, struct mvneta_buf *);
178 /* MAC address filter */
179 STATIC void mvneta_filter_setup(struct mvneta_softc *);
182 STATIC int sysctl_read_mib(SYSCTL_HANDLER_ARGS);
183 STATIC int sysctl_clear_mib(SYSCTL_HANDLER_ARGS);
184 STATIC int sysctl_set_queue_rxthtime(SYSCTL_HANDLER_ARGS);
185 STATIC void sysctl_mvneta_init(struct mvneta_softc *);
188 STATIC void mvneta_clear_mib(struct mvneta_softc *);
189 STATIC void mvneta_update_mib(struct mvneta_softc *);
192 STATIC boolean_t mvneta_find_ethernet_prop_switch(phandle_t, phandle_t);
193 STATIC boolean_t mvneta_has_switch(device_t);
195 #define mvneta_sc_lock(sc) mtx_lock(&sc->mtx)
196 #define mvneta_sc_unlock(sc) mtx_unlock(&sc->mtx)
198 STATIC struct mtx mii_mutex;
199 STATIC int mii_init = 0;
202 STATIC int mvneta_detach(device_t);
204 STATIC int mvneta_miibus_readreg(device_t, int, int);
205 STATIC int mvneta_miibus_writereg(device_t, int, int, int);
208 STATIC uint32_t mvneta_get_clk(void);
210 static device_method_t mvneta_methods[] = {
211 /* Device interface */
212 DEVMETHOD(device_detach, mvneta_detach),
214 DEVMETHOD(miibus_readreg, mvneta_miibus_readreg),
215 DEVMETHOD(miibus_writereg, mvneta_miibus_writereg),
217 DEVMETHOD(mdio_readreg, mvneta_miibus_readreg),
218 DEVMETHOD(mdio_writereg, mvneta_miibus_writereg),
224 DEFINE_CLASS_0(mvneta, mvneta_driver, mvneta_methods, sizeof(struct mvneta_softc));
226 DRIVER_MODULE(miibus, mvneta, miibus_driver, miibus_devclass, 0, 0);
227 DRIVER_MODULE(mdio, mvneta, mdio_driver, mdio_devclass, 0, 0);
228 MODULE_DEPEND(mvneta, mdio, 1, 1, 1);
229 MODULE_DEPEND(mvneta, ether, 1, 1, 1);
230 MODULE_DEPEND(mvneta, miibus, 1, 1, 1);
231 MODULE_DEPEND(mvneta, mvxpbm, 1, 1, 1);
234 * List of MIB register and names
238 MVNETA_MIB_RX_GOOD_OCT_IDX,
239 MVNETA_MIB_RX_BAD_OCT_IDX,
240 MVNETA_MIB_TX_MAC_TRNS_ERR_IDX,
241 MVNETA_MIB_RX_GOOD_FRAME_IDX,
242 MVNETA_MIB_RX_BAD_FRAME_IDX,
243 MVNETA_MIB_RX_BCAST_FRAME_IDX,
244 MVNETA_MIB_RX_MCAST_FRAME_IDX,
245 MVNETA_MIB_RX_FRAME64_OCT_IDX,
246 MVNETA_MIB_RX_FRAME127_OCT_IDX,
247 MVNETA_MIB_RX_FRAME255_OCT_IDX,
248 MVNETA_MIB_RX_FRAME511_OCT_IDX,
249 MVNETA_MIB_RX_FRAME1023_OCT_IDX,
250 MVNETA_MIB_RX_FRAMEMAX_OCT_IDX,
251 MVNETA_MIB_TX_GOOD_OCT_IDX,
252 MVNETA_MIB_TX_GOOD_FRAME_IDX,
253 MVNETA_MIB_TX_EXCES_COL_IDX,
254 MVNETA_MIB_TX_MCAST_FRAME_IDX,
255 MVNETA_MIB_TX_BCAST_FRAME_IDX,
256 MVNETA_MIB_TX_MAC_CTL_ERR_IDX,
257 MVNETA_MIB_FC_SENT_IDX,
258 MVNETA_MIB_FC_GOOD_IDX,
259 MVNETA_MIB_FC_BAD_IDX,
260 MVNETA_MIB_PKT_UNDERSIZE_IDX,
261 MVNETA_MIB_PKT_FRAGMENT_IDX,
262 MVNETA_MIB_PKT_OVERSIZE_IDX,
263 MVNETA_MIB_PKT_JABBER_IDX,
264 MVNETA_MIB_MAC_RX_ERR_IDX,
265 MVNETA_MIB_MAC_CRC_ERR_IDX,
266 MVNETA_MIB_MAC_COL_IDX,
267 MVNETA_MIB_MAC_LATE_COL_IDX,
270 STATIC struct mvneta_mib_def {
273 const char *sysctl_name;
275 } mvneta_mib_list[] = {
276 [MVNETA_MIB_RX_GOOD_OCT_IDX] = {MVNETA_MIB_RX_GOOD_OCT, 1,
277 "rx_good_oct", "Good Octets Rx"},
278 [MVNETA_MIB_RX_BAD_OCT_IDX] = {MVNETA_MIB_RX_BAD_OCT, 0,
279 "rx_bad_oct", "Bad Octets Rx"},
280 [MVNETA_MIB_TX_MAC_TRNS_ERR_IDX] = {MVNETA_MIB_TX_MAC_TRNS_ERR, 0,
281 "tx_mac_err", "MAC Transmit Error"},
282 [MVNETA_MIB_RX_GOOD_FRAME_IDX] = {MVNETA_MIB_RX_GOOD_FRAME, 0,
283 "rx_good_frame", "Good Frames Rx"},
284 [MVNETA_MIB_RX_BAD_FRAME_IDX] = {MVNETA_MIB_RX_BAD_FRAME, 0,
285 "rx_bad_frame", "Bad Frames Rx"},
286 [MVNETA_MIB_RX_BCAST_FRAME_IDX] = {MVNETA_MIB_RX_BCAST_FRAME, 0,
287 "rx_bcast_frame", "Broadcast Frames Rx"},
288 [MVNETA_MIB_RX_MCAST_FRAME_IDX] = {MVNETA_MIB_RX_MCAST_FRAME, 0,
289 "rx_mcast_frame", "Multicast Frames Rx"},
290 [MVNETA_MIB_RX_FRAME64_OCT_IDX] = {MVNETA_MIB_RX_FRAME64_OCT, 0,
291 "rx_frame_1_64", "Frame Size 1 - 64"},
292 [MVNETA_MIB_RX_FRAME127_OCT_IDX] = {MVNETA_MIB_RX_FRAME127_OCT, 0,
293 "rx_frame_65_127", "Frame Size 65 - 127"},
294 [MVNETA_MIB_RX_FRAME255_OCT_IDX] = {MVNETA_MIB_RX_FRAME255_OCT, 0,
295 "rx_frame_128_255", "Frame Size 128 - 255"},
296 [MVNETA_MIB_RX_FRAME511_OCT_IDX] = {MVNETA_MIB_RX_FRAME511_OCT, 0,
297 "rx_frame_256_511", "Frame Size 256 - 511"},
298 [MVNETA_MIB_RX_FRAME1023_OCT_IDX] = {MVNETA_MIB_RX_FRAME1023_OCT, 0,
299 "rx_frame_512_1023", "Frame Size 512 - 1023"},
300 [MVNETA_MIB_RX_FRAMEMAX_OCT_IDX] = {MVNETA_MIB_RX_FRAMEMAX_OCT, 0,
301 "rx_fame_1024_max", "Frame Size 1024 - Max"},
302 [MVNETA_MIB_TX_GOOD_OCT_IDX] = {MVNETA_MIB_TX_GOOD_OCT, 1,
303 "tx_good_oct", "Good Octets Tx"},
304 [MVNETA_MIB_TX_GOOD_FRAME_IDX] = {MVNETA_MIB_TX_GOOD_FRAME, 0,
305 "tx_good_frame", "Good Frames Tx"},
306 [MVNETA_MIB_TX_EXCES_COL_IDX] = {MVNETA_MIB_TX_EXCES_COL, 0,
307 "tx_exces_collision", "Excessive Collision"},
308 [MVNETA_MIB_TX_MCAST_FRAME_IDX] = {MVNETA_MIB_TX_MCAST_FRAME, 0,
309 "tx_mcast_frame", "Multicast Frames Tx"},
310 [MVNETA_MIB_TX_BCAST_FRAME_IDX] = {MVNETA_MIB_TX_BCAST_FRAME, 0,
311 "tx_bcast_frame", "Broadcast Frames Tx"},
312 [MVNETA_MIB_TX_MAC_CTL_ERR_IDX] = {MVNETA_MIB_TX_MAC_CTL_ERR, 0,
313 "tx_mac_ctl_err", "Unknown MAC Control"},
314 [MVNETA_MIB_FC_SENT_IDX] = {MVNETA_MIB_FC_SENT, 0,
315 "fc_tx", "Flow Control Tx"},
316 [MVNETA_MIB_FC_GOOD_IDX] = {MVNETA_MIB_FC_GOOD, 0,
317 "fc_rx_good", "Good Flow Control Rx"},
318 [MVNETA_MIB_FC_BAD_IDX] = {MVNETA_MIB_FC_BAD, 0,
319 "fc_rx_bad", "Bad Flow Control Rx"},
320 [MVNETA_MIB_PKT_UNDERSIZE_IDX] = {MVNETA_MIB_PKT_UNDERSIZE, 0,
321 "pkt_undersize", "Undersized Packets Rx"},
322 [MVNETA_MIB_PKT_FRAGMENT_IDX] = {MVNETA_MIB_PKT_FRAGMENT, 0,
323 "pkt_fragment", "Fragmented Packets Rx"},
324 [MVNETA_MIB_PKT_OVERSIZE_IDX] = {MVNETA_MIB_PKT_OVERSIZE, 0,
325 "pkt_oversize", "Oversized Packets Rx"},
326 [MVNETA_MIB_PKT_JABBER_IDX] = {MVNETA_MIB_PKT_JABBER, 0,
327 "pkt_jabber", "Jabber Packets Rx"},
328 [MVNETA_MIB_MAC_RX_ERR_IDX] = {MVNETA_MIB_MAC_RX_ERR, 0,
329 "mac_rx_err", "MAC Rx Errors"},
330 [MVNETA_MIB_MAC_CRC_ERR_IDX] = {MVNETA_MIB_MAC_CRC_ERR, 0,
331 "mac_crc_err", "MAC CRC Errors"},
332 [MVNETA_MIB_MAC_COL_IDX] = {MVNETA_MIB_MAC_COL, 0,
333 "mac_collision", "MAC Collision"},
334 [MVNETA_MIB_MAC_LATE_COL_IDX] = {MVNETA_MIB_MAC_LATE_COL, 0,
335 "mac_late_collision", "MAC Late Collision"},
338 static struct resource_spec res_spec[] = {
339 { SYS_RES_MEMORY, 0, RF_ACTIVE },
340 { SYS_RES_IRQ, 0, RF_ACTIVE },
345 driver_intr_t *handler;
348 { mvneta_rxtxth_intr, "MVNETA aggregated interrupt" },
354 #if defined(__aarch64__)
355 return (A3700_TCLK_250MHZ);
362 mvneta_set_mac_address(struct mvneta_softc *sc, uint8_t *addr)
367 mac_l = (addr[4] << 8) | (addr[5]);
368 mac_h = (addr[0] << 24) | (addr[1] << 16) |
369 (addr[2] << 8) | (addr[3] << 0);
371 MVNETA_WRITE(sc, MVNETA_MACAL, mac_l);
372 MVNETA_WRITE(sc, MVNETA_MACAH, mac_h);
377 mvneta_get_mac_address(struct mvneta_softc *sc, uint8_t *addr)
379 uint32_t mac_l, mac_h;
382 if (mvneta_fdt_mac_address(sc, addr) == 0)
386 * Fall back -- use the currently programmed address.
388 mac_l = MVNETA_READ(sc, MVNETA_MACAL);
389 mac_h = MVNETA_READ(sc, MVNETA_MACAH);
390 if (mac_l == 0 && mac_h == 0) {
392 * Generate pseudo-random MAC.
393 * Set lower part to random number | unit number.
395 mac_l = arc4random() & ~0xff;
396 mac_l |= device_get_unit(sc->dev) & 0xff;
397 mac_h = arc4random();
398 mac_h &= ~(3 << 24); /* Clear multicast and LAA bits */
400 device_printf(sc->dev,
401 "Could not acquire MAC address. "
402 "Using randomized one.\n");
406 addr[0] = (mac_h & 0xff000000) >> 24;
407 addr[1] = (mac_h & 0x00ff0000) >> 16;
408 addr[2] = (mac_h & 0x0000ff00) >> 8;
409 addr[3] = (mac_h & 0x000000ff);
410 addr[4] = (mac_l & 0x0000ff00) >> 8;
411 addr[5] = (mac_l & 0x000000ff);
416 mvneta_find_ethernet_prop_switch(phandle_t ethernet, phandle_t node)
419 phandle_t child, switch_eth_handle, switch_eth;
421 for (child = OF_child(node); child != 0; child = OF_peer(child)) {
422 if (OF_getencprop(child, "ethernet", (void*)&switch_eth_handle,
423 sizeof(switch_eth_handle)) > 0) {
424 if (switch_eth_handle > 0) {
425 switch_eth = OF_node_from_xref(
428 if (switch_eth == ethernet)
433 ret = mvneta_find_ethernet_prop_switch(ethernet, child);
442 mvneta_has_switch(device_t self)
446 node = ofw_bus_get_node(self);
448 return mvneta_find_ethernet_prop_switch(node, OF_finddevice("/"));
452 mvneta_dma_create(struct mvneta_softc *sc)
454 size_t maxsize, maxsegsz;
461 maxsize = maxsegsz = sizeof(struct mvneta_tx_desc) * MVNETA_TX_RING_CNT;
463 error = bus_dma_tag_create(
464 bus_get_dma_tag(sc->dev), /* parent */
465 16, 0, /* alignment, boundary */
466 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
467 BUS_SPACE_MAXADDR, /* highaddr */
468 NULL, NULL, /* filtfunc, filtfuncarg */
469 maxsize, /* maxsize */
471 maxsegsz, /* maxsegsz */
473 NULL, NULL, /* lockfunc, lockfuncarg */
474 &sc->tx_dtag); /* dmat */
476 device_printf(sc->dev,
477 "Failed to create DMA tag for Tx descriptors.\n");
480 error = bus_dma_tag_create(
481 bus_get_dma_tag(sc->dev), /* parent */
482 1, 0, /* alignment, boundary */
483 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
484 BUS_SPACE_MAXADDR, /* highaddr */
485 NULL, NULL, /* filtfunc, filtfuncarg */
486 MVNETA_MAX_FRAME, /* maxsize */
487 MVNETA_TX_SEGLIMIT, /* nsegments */
488 MVNETA_MAX_FRAME, /* maxsegsz */
489 BUS_DMA_ALLOCNOW, /* flags */
490 NULL, NULL, /* lockfunc, lockfuncarg */
493 device_printf(sc->dev,
494 "Failed to create DMA tag for Tx mbufs.\n");
498 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
499 error = mvneta_ring_alloc_tx_queue(sc, q);
501 device_printf(sc->dev,
502 "Failed to allocate DMA safe memory for TxQ: %zu\n", q);
510 /* Create tag for Rx descripors */
511 error = bus_dma_tag_create(
512 bus_get_dma_tag(sc->dev), /* parent */
513 32, 0, /* alignment, boundary */
514 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
515 BUS_SPACE_MAXADDR, /* highaddr */
516 NULL, NULL, /* filtfunc, filtfuncarg */
517 sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT, /* maxsize */
519 sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT, /* maxsegsz */
521 NULL, NULL, /* lockfunc, lockfuncarg */
522 &sc->rx_dtag); /* dmat */
524 device_printf(sc->dev,
525 "Failed to create DMA tag for Rx descriptors.\n");
529 /* Create tag for Rx buffers */
530 error = bus_dma_tag_create(
531 bus_get_dma_tag(sc->dev), /* parent */
532 32, 0, /* alignment, boundary */
533 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
534 BUS_SPACE_MAXADDR, /* highaddr */
535 NULL, NULL, /* filtfunc, filtfuncarg */
536 MVNETA_MAX_FRAME, 1, /* maxsize, nsegments */
537 MVNETA_MAX_FRAME, /* maxsegsz */
539 NULL, NULL, /* lockfunc, lockfuncarg */
540 &sc->rxbuf_dtag); /* dmat */
542 device_printf(sc->dev,
543 "Failed to create DMA tag for Rx buffers.\n");
547 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
548 if (mvneta_ring_alloc_rx_queue(sc, q) != 0) {
549 device_printf(sc->dev,
550 "Failed to allocate DMA safe memory for RxQ: %zu\n", q);
557 mvneta_detach(sc->dev);
564 mvneta_attach(device_t self)
566 struct mvneta_softc *sc;
571 #if !defined(__aarch64__)
575 sc = device_get_softc(self);
578 mtx_init(&sc->mtx, "mvneta_sc", NULL, MTX_DEF);
580 error = bus_alloc_resources(self, res_spec, sc->res);
582 device_printf(self, "could not allocate resources\n");
586 sc->version = MVNETA_READ(sc, MVNETA_PV);
587 device_printf(self, "version is %x\n", sc->version);
588 callout_init(&sc->tick_ch, 0);
591 * make sure DMA engines are in reset state
593 MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000001);
594 MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000001);
596 #if !defined(__aarch64__)
598 * Disable port snoop for buffers and descriptors
599 * to avoid L2 caching of both without DRAM copy.
600 * Obtain coherency settings from the first MBUS
603 if ((MVNETA_READ(sc, MV_WIN_NETA_BASE(0)) & IO_WIN_COH_ATTR_MASK) == 0) {
604 reg = MVNETA_READ(sc, MVNETA_PSNPCFG);
605 reg &= ~MVNETA_PSNPCFG_DESCSNP_MASK;
606 reg &= ~MVNETA_PSNPCFG_BUFSNP_MASK;
607 MVNETA_WRITE(sc, MVNETA_PSNPCFG, reg);
614 if (mvneta_get_mac_address(sc, sc->enaddr)) {
615 device_printf(self, "no mac address.\n");
618 mvneta_set_mac_address(sc, sc->enaddr);
620 mvneta_disable_intr(sc);
622 /* Allocate network interface */
623 ifp = sc->ifp = if_alloc(IFT_ETHER);
625 device_printf(self, "if_alloc() failed\n");
629 if_initname(ifp, device_get_name(self), device_get_unit(self));
632 * We can support 802.1Q VLAN-sized frames and jumbo
635 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_JUMBO_MTU;
638 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
639 #ifdef MVNETA_MULTIQUEUE
640 ifp->if_transmit = mvneta_transmit;
641 ifp->if_qflush = mvneta_qflush;
642 #else /* !MVNETA_MULTIQUEUE */
643 ifp->if_start = mvneta_start;
644 ifp->if_snd.ifq_drv_maxlen = MVNETA_TX_RING_CNT - 1;
645 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
646 IFQ_SET_READY(&ifp->if_snd);
648 ifp->if_init = mvneta_init;
649 ifp->if_ioctl = mvneta_ioctl;
652 * We can do IPv4/TCPv4/UDPv4/TCPv6/UDPv6 checksums in hardware.
654 ifp->if_capabilities |= IFCAP_HWCSUM;
657 * As VLAN hardware tagging is not supported
658 * but is necessary to perform VLAN hardware checksums,
659 * it is done in the driver
661 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
664 * Currently IPv6 HW checksum is broken, so make sure it is disabled.
666 ifp->if_capabilities &= ~IFCAP_HWCSUM_IPV6;
667 ifp->if_capenable = ifp->if_capabilities;
670 * Disabled option(s):
671 * - Support for Large Receive Offload
673 ifp->if_capabilities |= IFCAP_LRO;
675 ifp->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP;
677 sc->rx_frame_size = MCLBYTES; /* ether_ifattach() always sets normal mtu */
680 * Device DMA Buffer allocation.
681 * Handles resource deallocation in case of failure.
683 error = mvneta_dma_create(sc);
689 /* Initialize queues */
690 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
691 error = mvneta_ring_init_tx_queue(sc, q);
698 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
699 error = mvneta_ring_init_rx_queue(sc, q);
706 ether_ifattach(ifp, sc->enaddr);
709 * Enable DMA engines and Initialize Device Registers.
711 MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000000);
712 MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000000);
713 MVNETA_WRITE(sc, MVNETA_PACC, MVNETA_PACC_ACCELERATIONMODE_EDM);
715 mvneta_filter_setup(sc);
716 mvneta_sc_unlock(sc);
720 * Now MAC is working, setup MII.
724 * MII bus is shared by all MACs and all PHYs in SoC.
725 * serializing the bus access should be safe.
727 mtx_init(&mii_mutex, "mvneta_mii", NULL, MTX_DEF);
732 if ((sc->phy_addr != MII_PHY_ANY) && (!sc->use_inband_status)) {
733 error = mii_attach(self, &sc->miibus, ifp, mvneta_mediachange,
734 mvneta_mediastatus, BMSR_DEFCAPMASK, sc->phy_addr,
739 "MII attach failed, error: %d\n", error);
741 ether_ifdetach(sc->ifp);
745 sc->mii = device_get_softc(sc->miibus);
746 sc->phy_attached = 1;
748 /* Disable auto-negotiation in MAC - rely on PHY layer */
749 mvneta_update_autoneg(sc, FALSE);
750 } else if (sc->use_inband_status == TRUE) {
751 /* In-band link status */
752 ifmedia_init(&sc->mvneta_ifmedia, 0, mvneta_mediachange,
755 /* Configure media */
756 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX,
758 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL);
759 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX,
761 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_10_T, 0, NULL);
762 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX,
764 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
765 ifmedia_set(&sc->mvneta_ifmedia, IFM_ETHER | IFM_AUTO);
767 /* Enable auto-negotiation */
768 mvneta_update_autoneg(sc, TRUE);
771 if (MVNETA_IS_LINKUP(sc))
775 mvneta_sc_unlock(sc);
778 /* Fixed-link, use predefined values */
779 mvneta_update_autoneg(sc, FALSE);
780 ifmedia_init(&sc->mvneta_ifmedia, 0, mvneta_mediachange,
783 ifm_target = IFM_ETHER;
784 switch (sc->phy_speed) {
786 if (sc->phy_mode != MVNETA_PHY_SGMII &&
787 sc->phy_mode != MVNETA_PHY_QSGMII) {
789 "2.5G speed can work only in (Q)SGMII mode\n");
790 ether_ifdetach(sc->ifp);
794 ifm_target |= IFM_2500_T;
797 ifm_target |= IFM_1000_T;
800 ifm_target |= IFM_100_TX;
803 ifm_target |= IFM_10_T;
806 ether_ifdetach(sc->ifp);
812 ifm_target |= IFM_FDX;
814 ifm_target |= IFM_HDX;
816 ifmedia_add(&sc->mvneta_ifmedia, ifm_target, 0, NULL);
817 ifmedia_set(&sc->mvneta_ifmedia, ifm_target);
818 if_link_state_change(sc->ifp, LINK_STATE_UP);
820 if (mvneta_has_switch(self)) {
822 device_printf(self, "This device is attached to a switch\n");
823 child = device_add_child(sc->dev, "mdio", -1);
825 ether_ifdetach(sc->ifp);
829 bus_generic_attach(sc->dev);
830 bus_generic_attach(child);
833 /* Configure MAC media */
834 mvneta_update_media(sc, ifm_target);
837 sysctl_mvneta_init(sc);
839 callout_reset(&sc->tick_ch, 0, mvneta_tick, sc);
841 error = bus_setup_intr(self, sc->res[1],
842 INTR_TYPE_NET | INTR_MPSAFE, NULL, mvneta_intrs[0].handler, sc,
845 device_printf(self, "could not setup %s\n",
846 mvneta_intrs[0].description);
847 ether_ifdetach(sc->ifp);
856 mvneta_detach(device_t dev)
858 struct mvneta_softc *sc;
861 sc = device_get_softc(dev);
864 /* Detach network interface */
868 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++)
869 mvneta_ring_dealloc_rx_queue(sc, q);
870 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++)
871 mvneta_ring_dealloc_tx_queue(sc, q);
873 if (sc->tx_dtag != NULL)
874 bus_dma_tag_destroy(sc->tx_dtag);
875 if (sc->rx_dtag != NULL)
876 bus_dma_tag_destroy(sc->rx_dtag);
877 if (sc->txmbuf_dtag != NULL)
878 bus_dma_tag_destroy(sc->txmbuf_dtag);
879 if (sc->rxbuf_dtag != NULL)
880 bus_dma_tag_destroy(sc->rxbuf_dtag);
882 bus_release_resources(dev, res_spec, sc->res);
890 mvneta_miibus_readreg(device_t dev, int phy, int reg)
892 struct mvneta_softc *sc;
897 sc = device_get_softc(dev);
900 mtx_lock(&mii_mutex);
902 for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
903 if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
907 if (i == MVNETA_PHY_TIMEOUT) {
908 if_printf(ifp, "SMI busy timeout\n");
909 mtx_unlock(&mii_mutex);
913 smi = MVNETA_SMI_PHYAD(phy) |
914 MVNETA_SMI_REGAD(reg) | MVNETA_SMI_OPCODE_READ;
915 MVNETA_WRITE(sc, MVNETA_SMI, smi);
917 for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
918 if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
923 if (i == MVNETA_PHY_TIMEOUT) {
924 if_printf(ifp, "SMI busy timeout\n");
925 mtx_unlock(&mii_mutex);
928 for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
929 smi = MVNETA_READ(sc, MVNETA_SMI);
930 if (smi & MVNETA_SMI_READVALID)
935 if (i == MVNETA_PHY_TIMEOUT) {
936 if_printf(ifp, "SMI busy timeout\n");
937 mtx_unlock(&mii_mutex);
941 mtx_unlock(&mii_mutex);
944 CTR3(KTR_SPARE2, "%s i=%d, timeout=%d\n", ifp->if_xname, i,
948 val = smi & MVNETA_SMI_DATA_MASK;
951 CTR4(KTR_SPARE2, "%s phy=%d, reg=%#x, val=%#x\n", ifp->if_xname, phy,
958 mvneta_miibus_writereg(device_t dev, int phy, int reg, int val)
960 struct mvneta_softc *sc;
965 sc = device_get_softc(dev);
968 CTR4(KTR_SPARE2, "%s phy=%d, reg=%#x, val=%#x\n", ifp->if_xname,
972 mtx_lock(&mii_mutex);
974 for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
975 if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
979 if (i == MVNETA_PHY_TIMEOUT) {
980 if_printf(ifp, "SMI busy timeout\n");
981 mtx_unlock(&mii_mutex);
985 smi = MVNETA_SMI_PHYAD(phy) | MVNETA_SMI_REGAD(reg) |
986 MVNETA_SMI_OPCODE_WRITE | (val & MVNETA_SMI_DATA_MASK);
987 MVNETA_WRITE(sc, MVNETA_SMI, smi);
989 for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
990 if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
995 mtx_unlock(&mii_mutex);
997 if (i == MVNETA_PHY_TIMEOUT)
998 if_printf(ifp, "phy write timed out\n");
1004 mvneta_portup(struct mvneta_softc *sc)
1008 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
1009 mvneta_rx_lockq(sc, q);
1010 mvneta_rx_queue_enable(sc->ifp, q);
1011 mvneta_rx_unlockq(sc, q);
1014 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1015 mvneta_tx_lockq(sc, q);
1016 mvneta_tx_queue_enable(sc->ifp, q);
1017 mvneta_tx_unlockq(sc, q);
1023 mvneta_portdown(struct mvneta_softc *sc)
1025 struct mvneta_rx_ring *rx;
1026 struct mvneta_tx_ring *tx;
1030 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
1031 rx = MVNETA_RX_RING(sc, q);
1032 mvneta_rx_lockq(sc, q);
1033 rx->queue_status = MVNETA_QUEUE_DISABLED;
1034 mvneta_rx_unlockq(sc, q);
1037 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1038 tx = MVNETA_TX_RING(sc, q);
1039 mvneta_tx_lockq(sc, q);
1040 tx->queue_status = MVNETA_QUEUE_DISABLED;
1041 mvneta_tx_unlockq(sc, q);
1044 /* Wait for all Rx activity to terminate. */
1045 reg = MVNETA_READ(sc, MVNETA_RQC) & MVNETA_RQC_EN_MASK;
1046 reg = MVNETA_RQC_DIS(reg);
1047 MVNETA_WRITE(sc, MVNETA_RQC, reg);
1050 if (cnt >= RX_DISABLE_TIMEOUT) {
1052 "timeout for RX stopped. rqc 0x%x\n", reg);
1056 reg = MVNETA_READ(sc, MVNETA_RQC);
1057 } while ((reg & MVNETA_RQC_EN_MASK) != 0);
1059 /* Wait for all Tx activity to terminate. */
1060 reg = MVNETA_READ(sc, MVNETA_PIE);
1061 reg &= ~MVNETA_PIE_TXPKTINTRPTENB_MASK;
1062 MVNETA_WRITE(sc, MVNETA_PIE, reg);
1064 reg = MVNETA_READ(sc, MVNETA_PRXTXTIM);
1065 reg &= ~MVNETA_PRXTXTI_TBTCQ_MASK;
1066 MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg);
1068 reg = MVNETA_READ(sc, MVNETA_TQC) & MVNETA_TQC_EN_MASK;
1069 reg = MVNETA_TQC_DIS(reg);
1070 MVNETA_WRITE(sc, MVNETA_TQC, reg);
1073 if (cnt >= TX_DISABLE_TIMEOUT) {
1075 "timeout for TX stopped. tqc 0x%x\n", reg);
1079 reg = MVNETA_READ(sc, MVNETA_TQC);
1080 } while ((reg & MVNETA_TQC_EN_MASK) != 0);
1082 /* Wait for all Tx FIFO is empty */
1085 if (cnt >= TX_FIFO_EMPTY_TIMEOUT) {
1087 "timeout for TX FIFO drained. ps0 0x%x\n", reg);
1091 reg = MVNETA_READ(sc, MVNETA_PS0);
1092 } while (((reg & MVNETA_PS0_TXFIFOEMP) == 0) &&
1093 ((reg & MVNETA_PS0_TXINPROG) != 0));
1097 * Device Register Initialization
1098 * reset device registers to device driver default value.
1099 * the device is not enabled here.
1102 mvneta_initreg(struct ifnet *ifp)
1104 struct mvneta_softc *sc;
1110 CTR1(KTR_SPARE2, "%s initializing device register", ifp->if_xname);
1113 /* Disable Legacy WRR, Disable EJP, Release from reset. */
1114 MVNETA_WRITE(sc, MVNETA_TQC_1, 0);
1115 /* Enable mbus retry. */
1116 MVNETA_WRITE(sc, MVNETA_MBUS_CONF, MVNETA_MBUS_RETRY_EN);
1118 /* Init TX/RX Queue Registers */
1119 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
1120 mvneta_rx_lockq(sc, q);
1121 if (mvneta_rx_queue_init(ifp, q) != 0) {
1122 device_printf(sc->dev,
1123 "initialization failed: cannot initialize queue\n");
1124 mvneta_rx_unlockq(sc, q);
1127 mvneta_rx_unlockq(sc, q);
1129 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1130 mvneta_tx_lockq(sc, q);
1131 if (mvneta_tx_queue_init(ifp, q) != 0) {
1132 device_printf(sc->dev,
1133 "initialization failed: cannot initialize queue\n");
1134 mvneta_tx_unlockq(sc, q);
1137 mvneta_tx_unlockq(sc, q);
1141 * Ethernet Unit Control - disable automatic PHY management by HW.
1142 * In case the port uses SMI-controlled PHY, poll its status with
1143 * mii_tick() and update MAC settings accordingly.
1145 reg = MVNETA_READ(sc, MVNETA_EUC);
1146 reg &= ~MVNETA_EUC_POLLING;
1147 MVNETA_WRITE(sc, MVNETA_EUC, reg);
1149 /* EEE: Low Power Idle */
1150 reg = MVNETA_LPIC0_LILIMIT(MVNETA_LPI_LI);
1151 reg |= MVNETA_LPIC0_TSLIMIT(MVNETA_LPI_TS);
1152 MVNETA_WRITE(sc, MVNETA_LPIC0, reg);
1154 reg = MVNETA_LPIC1_TWLIMIT(MVNETA_LPI_TW);
1155 MVNETA_WRITE(sc, MVNETA_LPIC1, reg);
1157 reg = MVNETA_LPIC2_MUSTSET;
1158 MVNETA_WRITE(sc, MVNETA_LPIC2, reg);
1160 /* Port MAC Control set 0 */
1161 reg = MVNETA_PMACC0_MUSTSET; /* must write 0x1 */
1162 reg &= ~MVNETA_PMACC0_PORTEN; /* port is still disabled */
1163 reg |= MVNETA_PMACC0_FRAMESIZELIMIT(ifp->if_mtu + MVNETA_ETHER_SIZE);
1164 MVNETA_WRITE(sc, MVNETA_PMACC0, reg);
1166 /* Port MAC Control set 2 */
1167 reg = MVNETA_READ(sc, MVNETA_PMACC2);
1168 switch (sc->phy_mode) {
1169 case MVNETA_PHY_QSGMII:
1170 reg |= (MVNETA_PMACC2_PCSEN | MVNETA_PMACC2_RGMIIEN);
1171 MVNETA_WRITE(sc, MVNETA_PSERDESCFG, MVNETA_PSERDESCFG_QSGMII);
1173 case MVNETA_PHY_SGMII:
1174 reg |= (MVNETA_PMACC2_PCSEN | MVNETA_PMACC2_RGMIIEN);
1175 MVNETA_WRITE(sc, MVNETA_PSERDESCFG, MVNETA_PSERDESCFG_SGMII);
1177 case MVNETA_PHY_RGMII:
1178 case MVNETA_PHY_RGMII_ID:
1179 reg |= MVNETA_PMACC2_RGMIIEN;
1182 reg |= MVNETA_PMACC2_MUSTSET;
1183 reg &= ~MVNETA_PMACC2_PORTMACRESET;
1184 MVNETA_WRITE(sc, MVNETA_PMACC2, reg);
1186 /* Port Configuration Extended: enable Tx CRC generation */
1187 reg = MVNETA_READ(sc, MVNETA_PXCX);
1188 reg &= ~MVNETA_PXCX_TXCRCDIS;
1189 MVNETA_WRITE(sc, MVNETA_PXCX, reg);
1191 /* clear MIB counter registers(clear by read) */
1192 for (i = 0; i < nitems(mvneta_mib_list); i++) {
1193 if (mvneta_mib_list[i].reg64)
1194 MVNETA_READ_MIB_8(sc, mvneta_mib_list[i].regnum);
1196 MVNETA_READ_MIB_4(sc, mvneta_mib_list[i].regnum);
1198 MVNETA_READ(sc, MVNETA_PDFC);
1199 MVNETA_READ(sc, MVNETA_POFC);
1201 /* Set SDC register except IPGINT bits */
1202 reg = MVNETA_SDC_RXBSZ_16_64BITWORDS;
1203 reg |= MVNETA_SDC_TXBSZ_16_64BITWORDS;
1204 reg |= MVNETA_SDC_BLMR;
1205 reg |= MVNETA_SDC_BLMT;
1206 MVNETA_WRITE(sc, MVNETA_SDC, reg);
1212 mvneta_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1217 *(bus_addr_t *)arg = segs->ds_addr;
1221 mvneta_ring_alloc_rx_queue(struct mvneta_softc *sc, int q)
1223 struct mvneta_rx_ring *rx;
1224 struct mvneta_buf *rxbuf;
1228 if (q >= MVNETA_RX_QNUM_MAX)
1231 rx = MVNETA_RX_RING(sc, q);
1232 mtx_init(&rx->ring_mtx, "mvneta_rx", NULL, MTX_DEF);
1233 /* Allocate DMA memory for Rx descriptors */
1234 error = bus_dmamem_alloc(sc->rx_dtag,
1235 (void**)&(rx->desc),
1236 BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1238 if (error != 0 || rx->desc == NULL)
1240 error = bus_dmamap_load(sc->rx_dtag, rx->desc_map,
1242 sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT,
1243 mvneta_dmamap_cb, &rx->desc_pa, BUS_DMA_NOWAIT);
1247 for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
1248 error = bus_dmamap_create(sc->rxbuf_dtag, 0, &dmap);
1250 device_printf(sc->dev,
1251 "Failed to create DMA map for Rx buffer num: %d\n", i);
1254 rxbuf = &rx->rxbuf[i];
1261 mvneta_ring_dealloc_rx_queue(sc, q);
1262 device_printf(sc->dev, "DMA Ring buffer allocation failure.\n");
1267 mvneta_ring_alloc_tx_queue(struct mvneta_softc *sc, int q)
1269 struct mvneta_tx_ring *tx;
1272 if (q >= MVNETA_TX_QNUM_MAX)
1274 tx = MVNETA_TX_RING(sc, q);
1275 mtx_init(&tx->ring_mtx, "mvneta_tx", NULL, MTX_DEF);
1276 error = bus_dmamem_alloc(sc->tx_dtag,
1277 (void**)&(tx->desc),
1278 BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1280 if (error != 0 || tx->desc == NULL)
1282 error = bus_dmamap_load(sc->tx_dtag, tx->desc_map,
1284 sizeof(struct mvneta_tx_desc) * MVNETA_TX_RING_CNT,
1285 mvneta_dmamap_cb, &tx->desc_pa, BUS_DMA_NOWAIT);
1289 #ifdef MVNETA_MULTIQUEUE
1290 tx->br = buf_ring_alloc(MVNETA_BUFRING_SIZE, M_DEVBUF, M_NOWAIT,
1292 if (tx->br == NULL) {
1293 device_printf(sc->dev,
1294 "Could not setup buffer ring for TxQ(%d)\n", q);
1302 mvneta_ring_dealloc_tx_queue(sc, q);
1303 device_printf(sc->dev, "DMA Ring buffer allocation failure.\n");
1308 mvneta_ring_dealloc_tx_queue(struct mvneta_softc *sc, int q)
1310 struct mvneta_tx_ring *tx;
1311 struct mvneta_buf *txbuf;
1316 if (q >= MVNETA_TX_QNUM_MAX)
1318 tx = MVNETA_TX_RING(sc, q);
1320 if (tx->taskq != NULL) {
1322 while (taskqueue_cancel(tx->taskq, &tx->task, NULL) != 0)
1323 taskqueue_drain(tx->taskq, &tx->task);
1325 #ifdef MVNETA_MULTIQUEUE
1327 drbr_free(tx->br, M_DEVBUF);
1330 if (sc->txmbuf_dtag != NULL) {
1331 if (mtx_name(&tx->ring_mtx) != NULL) {
1333 * It is assumed that maps are being loaded after mutex
1334 * is initialized. Therefore we can skip unloading maps
1335 * when mutex is empty.
1337 mvneta_tx_lockq(sc, q);
1338 mvneta_ring_flush_tx_queue(sc, q);
1339 mvneta_tx_unlockq(sc, q);
1341 for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
1342 txbuf = &tx->txbuf[i];
1343 if (txbuf->dmap != NULL) {
1344 error = bus_dmamap_destroy(sc->txmbuf_dtag,
1347 panic("%s: map busy for Tx descriptor (Q%d, %d)",
1354 if (tx->desc_pa != 0)
1355 bus_dmamap_unload(sc->tx_dtag, tx->desc_map);
1357 kva = (void *)tx->desc;
1359 bus_dmamem_free(sc->tx_dtag, tx->desc, tx->desc_map);
1361 if (mtx_name(&tx->ring_mtx) != NULL)
1362 mtx_destroy(&tx->ring_mtx);
1364 memset(tx, 0, sizeof(*tx));
1368 mvneta_ring_dealloc_rx_queue(struct mvneta_softc *sc, int q)
1370 struct mvneta_rx_ring *rx;
1371 struct lro_ctrl *lro;
1374 if (q >= MVNETA_RX_QNUM_MAX)
1377 rx = MVNETA_RX_RING(sc, q);
1379 mvneta_ring_flush_rx_queue(sc, q);
1381 if (rx->desc_pa != 0)
1382 bus_dmamap_unload(sc->rx_dtag, rx->desc_map);
1384 kva = (void *)rx->desc;
1386 bus_dmamem_free(sc->rx_dtag, rx->desc, rx->desc_map);
1391 if (mtx_name(&rx->ring_mtx) != NULL)
1392 mtx_destroy(&rx->ring_mtx);
1394 memset(rx, 0, sizeof(*rx));
1398 mvneta_ring_init_rx_queue(struct mvneta_softc *sc, int q)
1400 struct mvneta_rx_ring *rx;
1401 struct lro_ctrl *lro;
1404 if (q >= MVNETA_RX_QNUM_MAX)
1407 rx = MVNETA_RX_RING(sc, q);
1408 rx->dma = rx->cpu = 0;
1409 rx->queue_th_received = MVNETA_RXTH_COUNT;
1410 rx->queue_th_time = (mvneta_get_clk() / 1000) / 10; /* 0.1 [ms] */
1412 /* Initialize LRO */
1413 rx->lro_enabled = FALSE;
1414 if ((sc->ifp->if_capenable & IFCAP_LRO) != 0) {
1416 error = tcp_lro_init(lro);
1418 device_printf(sc->dev, "LRO Initialization failed!\n");
1420 rx->lro_enabled = TRUE;
1429 mvneta_ring_init_tx_queue(struct mvneta_softc *sc, int q)
1431 struct mvneta_tx_ring *tx;
1432 struct mvneta_buf *txbuf;
1435 if (q >= MVNETA_TX_QNUM_MAX)
1438 tx = MVNETA_TX_RING(sc, q);
1441 for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
1442 txbuf = &tx->txbuf[i];
1444 /* Tx handle needs DMA map for busdma_load_mbuf() */
1445 error = bus_dmamap_create(sc->txmbuf_dtag, 0,
1448 device_printf(sc->dev,
1449 "can't create dma map (tx ring %d)\n", i);
1453 tx->dma = tx->cpu = 0;
1456 tx->queue_status = MVNETA_QUEUE_DISABLED;
1457 tx->queue_hung = FALSE;
1461 TASK_INIT(&tx->task, 0, mvneta_tx_task, tx);
1462 tx->taskq = taskqueue_create_fast("mvneta_tx_taskq", M_WAITOK,
1463 taskqueue_thread_enqueue, &tx->taskq);
1464 taskqueue_start_threads(&tx->taskq, 1, PI_NET, "%s: tx_taskq(%d)",
1465 device_get_nameunit(sc->dev), q);
1471 mvneta_ring_flush_tx_queue(struct mvneta_softc *sc, int q)
1473 struct mvneta_tx_ring *tx;
1474 struct mvneta_buf *txbuf;
1477 tx = MVNETA_TX_RING(sc, q);
1478 KASSERT_TX_MTX(sc, q);
1481 for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
1482 txbuf = &tx->txbuf[i];
1483 bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
1484 if (txbuf->m != NULL) {
1489 tx->dma = tx->cpu = 0;
1494 mvneta_ring_flush_rx_queue(struct mvneta_softc *sc, int q)
1496 struct mvneta_rx_ring *rx;
1497 struct mvneta_buf *rxbuf;
1500 rx = MVNETA_RX_RING(sc, q);
1501 KASSERT_RX_MTX(sc, q);
1504 for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
1505 rxbuf = &rx->rxbuf[i];
1506 mvneta_rx_buf_free(sc, rxbuf);
1508 rx->dma = rx->cpu = 0;
1512 * Rx/Tx Queue Control
1515 mvneta_rx_queue_init(struct ifnet *ifp, int q)
1517 struct mvneta_softc *sc;
1518 struct mvneta_rx_ring *rx;
1522 KASSERT_RX_MTX(sc, q);
1523 rx = MVNETA_RX_RING(sc, q);
1524 DASSERT(rx->desc_pa != 0);
1526 /* descriptor address */
1527 MVNETA_WRITE(sc, MVNETA_PRXDQA(q), rx->desc_pa);
1529 /* Rx buffer size and descriptor ring size */
1530 reg = MVNETA_PRXDQS_BUFFERSIZE(sc->rx_frame_size >> 3);
1531 reg |= MVNETA_PRXDQS_DESCRIPTORSQUEUESIZE(MVNETA_RX_RING_CNT);
1532 MVNETA_WRITE(sc, MVNETA_PRXDQS(q), reg);
1534 CTR3(KTR_SPARE2, "%s PRXDQS(%d): %#x", ifp->if_xname, q,
1535 MVNETA_READ(sc, MVNETA_PRXDQS(q)));
1537 /* Rx packet offset address */
1538 reg = MVNETA_PRXC_PACKETOFFSET(MVNETA_PACKET_OFFSET >> 3);
1539 MVNETA_WRITE(sc, MVNETA_PRXC(q), reg);
1541 CTR3(KTR_SPARE2, "%s PRXC(%d): %#x", ifp->if_xname, q,
1542 MVNETA_READ(sc, MVNETA_PRXC(q)));
1545 /* if DMA is not working, register is not updated */
1546 DASSERT(MVNETA_READ(sc, MVNETA_PRXDQA(q)) == rx->desc_pa);
1551 mvneta_tx_queue_init(struct ifnet *ifp, int q)
1553 struct mvneta_softc *sc;
1554 struct mvneta_tx_ring *tx;
1558 KASSERT_TX_MTX(sc, q);
1559 tx = MVNETA_TX_RING(sc, q);
1560 DASSERT(tx->desc_pa != 0);
1562 /* descriptor address */
1563 MVNETA_WRITE(sc, MVNETA_PTXDQA(q), tx->desc_pa);
1565 /* descriptor ring size */
1566 reg = MVNETA_PTXDQS_DQS(MVNETA_TX_RING_CNT);
1567 MVNETA_WRITE(sc, MVNETA_PTXDQS(q), reg);
1569 /* if DMA is not working, register is not updated */
1570 DASSERT(MVNETA_READ(sc, MVNETA_PTXDQA(q)) == tx->desc_pa);
1575 mvneta_rx_queue_enable(struct ifnet *ifp, int q)
1577 struct mvneta_softc *sc;
1578 struct mvneta_rx_ring *rx;
1582 rx = MVNETA_RX_RING(sc, q);
1583 KASSERT_RX_MTX(sc, q);
1585 /* Set Rx interrupt threshold */
1586 reg = MVNETA_PRXDQTH_ODT(rx->queue_th_received);
1587 MVNETA_WRITE(sc, MVNETA_PRXDQTH(q), reg);
1589 reg = MVNETA_PRXITTH_RITT(rx->queue_th_time);
1590 MVNETA_WRITE(sc, MVNETA_PRXITTH(q), reg);
1592 /* Unmask RXTX_TH Intr. */
1593 reg = MVNETA_READ(sc, MVNETA_PRXTXTIM);
1594 reg |= MVNETA_PRXTXTI_RBICTAPQ(q); /* Rx Buffer Interrupt Coalese */
1595 MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg);
1597 /* Enable Rx queue */
1598 reg = MVNETA_READ(sc, MVNETA_RQC) & MVNETA_RQC_EN_MASK;
1599 reg |= MVNETA_RQC_ENQ(q);
1600 MVNETA_WRITE(sc, MVNETA_RQC, reg);
1602 rx->queue_status = MVNETA_QUEUE_WORKING;
1607 mvneta_tx_queue_enable(struct ifnet *ifp, int q)
1609 struct mvneta_softc *sc;
1610 struct mvneta_tx_ring *tx;
1613 tx = MVNETA_TX_RING(sc, q);
1614 KASSERT_TX_MTX(sc, q);
1616 /* Enable Tx queue */
1617 MVNETA_WRITE(sc, MVNETA_TQC, MVNETA_TQC_ENQ(q));
1619 tx->queue_status = MVNETA_QUEUE_IDLE;
1620 tx->queue_hung = FALSE;
1624 STATIC __inline void
1625 mvneta_rx_lockq(struct mvneta_softc *sc, int q)
1629 DASSERT(q < MVNETA_RX_QNUM_MAX);
1630 mtx_lock(&sc->rx_ring[q].ring_mtx);
1633 STATIC __inline void
1634 mvneta_rx_unlockq(struct mvneta_softc *sc, int q)
1638 DASSERT(q < MVNETA_RX_QNUM_MAX);
1639 mtx_unlock(&sc->rx_ring[q].ring_mtx);
1642 STATIC __inline int __unused
1643 mvneta_tx_trylockq(struct mvneta_softc *sc, int q)
1647 DASSERT(q < MVNETA_TX_QNUM_MAX);
1648 return (mtx_trylock(&sc->tx_ring[q].ring_mtx));
1651 STATIC __inline void
1652 mvneta_tx_lockq(struct mvneta_softc *sc, int q)
1656 DASSERT(q < MVNETA_TX_QNUM_MAX);
1657 mtx_lock(&sc->tx_ring[q].ring_mtx);
1660 STATIC __inline void
1661 mvneta_tx_unlockq(struct mvneta_softc *sc, int q)
1665 DASSERT(q < MVNETA_TX_QNUM_MAX);
1666 mtx_unlock(&sc->tx_ring[q].ring_mtx);
1670 * Interrupt Handlers
1673 mvneta_disable_intr(struct mvneta_softc *sc)
1676 MVNETA_WRITE(sc, MVNETA_EUIM, 0);
1677 MVNETA_WRITE(sc, MVNETA_EUIC, 0);
1678 MVNETA_WRITE(sc, MVNETA_PRXTXTIM, 0);
1679 MVNETA_WRITE(sc, MVNETA_PRXTXTIC, 0);
1680 MVNETA_WRITE(sc, MVNETA_PRXTXIM, 0);
1681 MVNETA_WRITE(sc, MVNETA_PRXTXIC, 0);
1682 MVNETA_WRITE(sc, MVNETA_PMIM, 0);
1683 MVNETA_WRITE(sc, MVNETA_PMIC, 0);
1684 MVNETA_WRITE(sc, MVNETA_PIE, 0);
1688 mvneta_enable_intr(struct mvneta_softc *sc)
1692 /* Enable Summary Bit to check all interrupt cause. */
1693 reg = MVNETA_READ(sc, MVNETA_PRXTXTIM);
1694 reg |= MVNETA_PRXTXTI_PMISCICSUMMARY;
1695 MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg);
1697 if (sc->use_inband_status) {
1698 /* Enable Port MISC Intr. (via RXTX_TH_Summary bit) */
1699 MVNETA_WRITE(sc, MVNETA_PMIM, MVNETA_PMI_PHYSTATUSCHNG |
1700 MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHANGE);
1703 /* Enable All Queue Interrupt */
1704 reg = MVNETA_READ(sc, MVNETA_PIE);
1705 reg |= MVNETA_PIE_RXPKTINTRPTENB_MASK;
1706 reg |= MVNETA_PIE_TXPKTINTRPTENB_MASK;
1707 MVNETA_WRITE(sc, MVNETA_PIE, reg);
1711 mvneta_rxtxth_intr(void *arg)
1713 struct mvneta_softc *sc;
1715 uint32_t ic, queues;
1720 CTR1(KTR_SPARE2, "%s got RXTX_TH_Intr", ifp->if_xname);
1722 ic = MVNETA_READ(sc, MVNETA_PRXTXTIC);
1725 MVNETA_WRITE(sc, MVNETA_PRXTXTIC, ~ic);
1727 /* Ack maintance interrupt first */
1728 if (__predict_false((ic & MVNETA_PRXTXTI_PMISCICSUMMARY) &&
1729 sc->use_inband_status)) {
1731 mvneta_misc_intr(sc);
1732 mvneta_sc_unlock(sc);
1734 if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING)))
1736 /* RxTxTH interrupt */
1737 queues = MVNETA_PRXTXTI_GET_RBICTAPQ(ic);
1738 if (__predict_true(queues)) {
1740 CTR1(KTR_SPARE2, "%s got PRXTXTIC: +RXEOF", ifp->if_xname);
1742 /* At the moment the driver support only one RX queue. */
1743 DASSERT(MVNETA_IS_QUEUE_SET(queues, 0));
1744 mvneta_rx(sc, 0, 0);
1749 mvneta_misc_intr(struct mvneta_softc *sc)
1755 CTR1(KTR_SPARE2, "%s got MISC_INTR", sc->ifp->if_xname);
1760 ic = MVNETA_READ(sc, MVNETA_PMIC);
1761 ic &= MVNETA_READ(sc, MVNETA_PMIM);
1764 MVNETA_WRITE(sc, MVNETA_PMIC, ~ic);
1767 if (ic & (MVNETA_PMI_PHYSTATUSCHNG |
1768 MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHANGE))
1769 mvneta_link_isr(sc);
1775 mvneta_tick(void *arg)
1777 struct mvneta_softc *sc;
1778 struct mvneta_tx_ring *tx;
1779 struct mvneta_rx_ring *rx;
1781 uint32_t fc_prev, fc_curr;
1786 * This is done before mib update to get the right stats
1789 mvneta_tx_drain(sc);
1791 /* Extract previous flow-control frame received counter. */
1792 fc_prev = sc->sysctl_mib[MVNETA_MIB_FC_GOOD_IDX].counter;
1793 /* Read mib registers (clear by read). */
1794 mvneta_update_mib(sc);
1795 /* Extract current flow-control frame received counter. */
1796 fc_curr = sc->sysctl_mib[MVNETA_MIB_FC_GOOD_IDX].counter;
1799 if (sc->phy_attached && sc->ifp->if_flags & IFF_UP) {
1803 /* Adjust MAC settings */
1804 mvneta_adjust_link(sc);
1805 mvneta_sc_unlock(sc);
1809 * We were unable to refill the rx queue and left the rx func, leaving
1810 * the ring without mbuf and no way to call the refill func.
1812 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
1813 rx = MVNETA_RX_RING(sc, q);
1814 if (rx->needs_refill == TRUE) {
1815 mvneta_rx_lockq(sc, q);
1816 mvneta_rx_queue_refill(sc, q);
1817 mvneta_rx_unlockq(sc, q);
1823 * - check if queue is mark as hung.
1824 * - ignore hung status if we received some pause frame
1825 * as hardware may have paused packet transmit.
1827 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1829 * We should take queue lock, but as we only read
1830 * queue status we can do it without lock, we may
1831 * only missdetect queue status for one tick.
1833 tx = MVNETA_TX_RING(sc, q);
1835 if (tx->queue_hung && (fc_curr - fc_prev) == 0)
1839 callout_schedule(&sc->tick_ch, hz);
1843 if_printf(sc->ifp, "watchdog timeout\n");
1846 sc->counter_watchdog++;
1847 sc->counter_watchdog_mib++;
1848 /* Trigger reinitialize sequence. */
1849 mvneta_stop_locked(sc);
1850 mvneta_init_locked(sc);
1851 mvneta_sc_unlock(sc);
1855 mvneta_qflush(struct ifnet *ifp)
1857 #ifdef MVNETA_MULTIQUEUE
1858 struct mvneta_softc *sc;
1859 struct mvneta_tx_ring *tx;
1865 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1866 tx = MVNETA_TX_RING(sc, q);
1867 mvneta_tx_lockq(sc, q);
1868 while ((m = buf_ring_dequeue_sc(tx->br)) != NULL)
1870 mvneta_tx_unlockq(sc, q);
1877 mvneta_tx_task(void *arg, int pending)
1879 struct mvneta_softc *sc;
1880 struct mvneta_tx_ring *tx;
1888 mvneta_tx_lockq(sc, tx->qidx);
1889 error = mvneta_xmit_locked(sc, tx->qidx);
1890 mvneta_tx_unlockq(sc, tx->qidx);
1893 if (__predict_false(error != 0 && error != ENETDOWN)) {
1894 pause("mvneta_tx_task_sleep", 1);
1895 taskqueue_enqueue(tx->taskq, &tx->task);
1900 mvneta_xmitfast_locked(struct mvneta_softc *sc, int q, struct mbuf **m)
1902 struct mvneta_tx_ring *tx;
1906 KASSERT_TX_MTX(sc, q);
1907 tx = MVNETA_TX_RING(sc, q);
1912 /* Dont enqueue packet if the queue is disabled. */
1913 if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED)) {
1919 /* Reclaim mbuf if above threshold. */
1920 if (__predict_true(tx->used > MVNETA_TX_RECLAIM_COUNT))
1921 mvneta_tx_queue_complete(sc, q);
1923 /* Do not call transmit path if queue is already too full. */
1924 if (__predict_false(tx->used >
1925 MVNETA_TX_RING_CNT - MVNETA_TX_SEGLIMIT))
1928 error = mvneta_tx_queue(sc, m, q);
1929 if (__predict_false(error != 0))
1932 /* Send a copy of the frame to the BPF listener */
1933 ETHER_BPF_MTAP(ifp, *m);
1935 /* Set watchdog on */
1936 tx->watchdog_time = ticks;
1937 tx->queue_status = MVNETA_QUEUE_WORKING;
1942 #ifdef MVNETA_MULTIQUEUE
1944 mvneta_transmit(struct ifnet *ifp, struct mbuf *m)
1946 struct mvneta_softc *sc;
1947 struct mvneta_tx_ring *tx;
1953 /* Use default queue if there is no flow id as thread can migrate. */
1954 if (__predict_true(M_HASHTYPE_GET(m) != M_HASHTYPE_NONE))
1955 q = m->m_pkthdr.flowid % MVNETA_TX_QNUM_MAX;
1959 tx = MVNETA_TX_RING(sc, q);
1961 /* If buf_ring is full start transmit immediatly. */
1962 if (buf_ring_full(tx->br)) {
1963 mvneta_tx_lockq(sc, q);
1964 mvneta_xmit_locked(sc, q);
1965 mvneta_tx_unlockq(sc, q);
1969 * If the buf_ring is empty we will not reorder packets.
1970 * If the lock is available transmit without using buf_ring.
1972 if (buf_ring_empty(tx->br) && mvneta_tx_trylockq(sc, q) != 0) {
1973 error = mvneta_xmitfast_locked(sc, q, &m);
1974 mvneta_tx_unlockq(sc, q);
1975 if (__predict_true(error == 0))
1978 /* Transmit can fail in fastpath. */
1979 if (__predict_false(m == NULL))
1983 /* Enqueue then schedule taskqueue. */
1984 error = drbr_enqueue(ifp, tx->br, m);
1985 if (__predict_false(error != 0))
1988 taskqueue_enqueue(tx->taskq, &tx->task);
1993 mvneta_xmit_locked(struct mvneta_softc *sc, int q)
1996 struct mvneta_tx_ring *tx;
2000 KASSERT_TX_MTX(sc, q);
2002 tx = MVNETA_TX_RING(sc, q);
2005 while ((m = drbr_peek(ifp, tx->br)) != NULL) {
2006 error = mvneta_xmitfast_locked(sc, q, &m);
2007 if (__predict_false(error != 0)) {
2009 drbr_putback(ifp, tx->br, m);
2011 drbr_advance(ifp, tx->br);
2014 drbr_advance(ifp, tx->br);
2019 #else /* !MVNETA_MULTIQUEUE */
2021 mvneta_start(struct ifnet *ifp)
2023 struct mvneta_softc *sc;
2024 struct mvneta_tx_ring *tx;
2028 tx = MVNETA_TX_RING(sc, 0);
2030 mvneta_tx_lockq(sc, 0);
2031 error = mvneta_xmit_locked(sc, 0);
2032 mvneta_tx_unlockq(sc, 0);
2033 /* Handle retransmit in the background taskq. */
2034 if (__predict_false(error != 0 && error != ENETDOWN))
2035 taskqueue_enqueue(tx->taskq, &tx->task);
2039 mvneta_xmit_locked(struct mvneta_softc *sc, int q)
2042 struct mvneta_tx_ring *tx;
2046 KASSERT_TX_MTX(sc, q);
2048 tx = MVNETA_TX_RING(sc, 0);
2051 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
2052 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
2056 error = mvneta_xmitfast_locked(sc, q, &m);
2057 if (__predict_false(error != 0)) {
2059 IFQ_DRV_PREPEND(&ifp->if_snd, m);
2069 mvneta_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2071 struct mvneta_softc *sc;
2072 struct mvneta_rx_ring *rx;
2080 ifr = (struct ifreq *)data;
2084 if (ifp->if_flags & IFF_UP) {
2085 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2086 flags = ifp->if_flags ^ sc->mvneta_if_flags;
2089 sc->mvneta_if_flags = ifp->if_flags;
2091 if ((flags & IFF_PROMISC) != 0)
2092 mvneta_filter_setup(sc);
2094 mvneta_init_locked(sc);
2095 sc->mvneta_if_flags = ifp->if_flags;
2096 if (sc->phy_attached)
2097 mii_mediachg(sc->mii);
2098 mvneta_sc_unlock(sc);
2101 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2102 mvneta_stop_locked(sc);
2104 sc->mvneta_if_flags = ifp->if_flags;
2105 mvneta_sc_unlock(sc);
2108 if (ifp->if_mtu > sc->tx_csum_limit &&
2109 ifr->ifr_reqcap & IFCAP_TXCSUM)
2110 ifr->ifr_reqcap &= ~IFCAP_TXCSUM;
2111 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
2112 if (mask & IFCAP_HWCSUM) {
2113 ifp->if_capenable &= ~IFCAP_HWCSUM;
2114 ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
2115 if (ifp->if_capenable & IFCAP_TXCSUM)
2116 ifp->if_hwassist = CSUM_IP | CSUM_TCP |
2119 ifp->if_hwassist = 0;
2121 if (mask & IFCAP_LRO) {
2123 ifp->if_capenable ^= IFCAP_LRO;
2124 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2125 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
2126 rx = MVNETA_RX_RING(sc, q);
2127 rx->lro_enabled = !rx->lro_enabled;
2130 mvneta_sc_unlock(sc);
2132 VLAN_CAPABILITIES(ifp);
2135 if ((IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T ||
2136 IFM_SUBTYPE(ifr->ifr_media) == IFM_2500_T) &&
2137 (ifr->ifr_media & IFM_FDX) == 0) {
2138 device_printf(sc->dev,
2139 "%s half-duplex unsupported\n",
2140 IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T ?
2146 case SIOCGIFMEDIA: /* FALLTHROUGH */
2148 if (!sc->phy_attached)
2149 error = ifmedia_ioctl(ifp, ifr, &sc->mvneta_ifmedia,
2152 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media,
2156 if (ifr->ifr_mtu < 68 || ifr->ifr_mtu > MVNETA_MAX_FRAME -
2157 MVNETA_ETHER_SIZE) {
2160 ifp->if_mtu = ifr->ifr_mtu;
2162 if (ifp->if_mtu + MVNETA_ETHER_SIZE <= MCLBYTES) {
2163 sc->rx_frame_size = MCLBYTES;
2165 sc->rx_frame_size = MJUM9BYTES;
2167 if (ifp->if_mtu > sc->tx_csum_limit) {
2168 ifp->if_capenable &= ~IFCAP_TXCSUM;
2169 ifp->if_hwassist = 0;
2171 ifp->if_capenable |= IFCAP_TXCSUM;
2172 ifp->if_hwassist = CSUM_IP | CSUM_TCP |
2176 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2178 mvneta_stop_locked(sc);
2180 * Reinitialize RX queues.
2181 * We need to update RX descriptor size.
2183 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
2184 mvneta_rx_lockq(sc, q);
2185 if (mvneta_rx_queue_init(ifp, q) != 0) {
2186 device_printf(sc->dev,
2187 "initialization failed:"
2188 " cannot initialize queue\n");
2189 mvneta_rx_unlockq(sc, q);
2193 mvneta_rx_unlockq(sc, q);
2195 /* Trigger reinitialization */
2196 mvneta_init_locked(sc);
2198 mvneta_sc_unlock(sc);
2203 error = ether_ioctl(ifp, cmd, data);
2211 mvneta_init_locked(void *arg)
2213 struct mvneta_softc *sc;
2221 if (!device_is_attached(sc->dev) ||
2222 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2225 mvneta_disable_intr(sc);
2226 callout_stop(&sc->tick_ch);
2228 /* Get the latest mac address */
2229 bcopy(IF_LLADDR(ifp), sc->enaddr, ETHER_ADDR_LEN);
2230 mvneta_set_mac_address(sc, sc->enaddr);
2231 mvneta_filter_setup(sc);
2233 /* Start DMA Engine */
2234 MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000000);
2235 MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000000);
2236 MVNETA_WRITE(sc, MVNETA_PACC, MVNETA_PACC_ACCELERATIONMODE_EDM);
2239 reg = MVNETA_READ(sc, MVNETA_PMACC0);
2240 reg |= MVNETA_PMACC0_PORTEN;
2241 reg &= ~MVNETA_PMACC0_FRAMESIZELIMIT_MASK;
2242 reg |= MVNETA_PMACC0_FRAMESIZELIMIT(ifp->if_mtu + MVNETA_ETHER_SIZE);
2243 MVNETA_WRITE(sc, MVNETA_PMACC0, reg);
2245 /* Allow access to each TXQ/RXQ from both CPU's */
2246 for (cpu = 0; cpu < mp_ncpus; ++cpu)
2247 MVNETA_WRITE(sc, MVNETA_PCP2Q(cpu),
2248 MVNETA_PCP2Q_TXQEN_MASK | MVNETA_PCP2Q_RXQEN_MASK);
2250 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
2251 mvneta_rx_lockq(sc, q);
2252 mvneta_rx_queue_refill(sc, q);
2253 mvneta_rx_unlockq(sc, q);
2256 if (!sc->phy_attached)
2259 /* Enable interrupt */
2260 mvneta_enable_intr(sc);
2263 callout_schedule(&sc->tick_ch, hz);
2265 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2269 mvneta_init(void *arg)
2271 struct mvneta_softc *sc;
2275 mvneta_init_locked(sc);
2276 if (sc->phy_attached)
2277 mii_mediachg(sc->mii);
2278 mvneta_sc_unlock(sc);
2283 mvneta_stop_locked(struct mvneta_softc *sc)
2286 struct mvneta_rx_ring *rx;
2287 struct mvneta_tx_ring *tx;
2292 if (ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2295 mvneta_disable_intr(sc);
2297 callout_stop(&sc->tick_ch);
2299 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2302 if (sc->linkup == TRUE)
2303 mvneta_linkdown(sc);
2305 /* Reset the MAC Port Enable bit */
2306 reg = MVNETA_READ(sc, MVNETA_PMACC0);
2307 reg &= ~MVNETA_PMACC0_PORTEN;
2308 MVNETA_WRITE(sc, MVNETA_PMACC0, reg);
2310 /* Disable each of queue */
2311 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
2312 rx = MVNETA_RX_RING(sc, q);
2314 mvneta_rx_lockq(sc, q);
2315 mvneta_ring_flush_rx_queue(sc, q);
2316 mvneta_rx_unlockq(sc, q);
2320 * Hold Reset state of DMA Engine
2321 * (must write 0x0 to restart it)
2323 MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000001);
2324 MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000001);
2326 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
2327 tx = MVNETA_TX_RING(sc, q);
2329 mvneta_tx_lockq(sc, q);
2330 mvneta_ring_flush_tx_queue(sc, q);
2331 mvneta_tx_unlockq(sc, q);
2336 mvneta_stop(struct mvneta_softc *sc)
2340 mvneta_stop_locked(sc);
2341 mvneta_sc_unlock(sc);
2345 mvneta_mediachange(struct ifnet *ifp)
2347 struct mvneta_softc *sc;
2351 if (!sc->phy_attached && !sc->use_inband_status) {
2352 /* We shouldn't be here */
2353 if_printf(ifp, "Cannot change media in fixed-link mode!\n");
2357 if (sc->use_inband_status) {
2358 mvneta_update_media(sc, sc->mvneta_ifmedia.ifm_media);
2365 mii_mediachg(sc->mii);
2367 mvneta_sc_unlock(sc);
2373 mvneta_get_media(struct mvneta_softc *sc, struct ifmediareq *ifmr)
2377 psr = MVNETA_READ(sc, MVNETA_PSR);
2380 if (psr & MVNETA_PSR_GMIISPEED)
2381 ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_1000_T);
2382 else if (psr & MVNETA_PSR_MIISPEED)
2383 ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_100_TX);
2384 else if (psr & MVNETA_PSR_LINKUP)
2385 ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_10_T);
2388 if (psr & MVNETA_PSR_FULLDX)
2389 ifmr->ifm_active |= IFM_FDX;
2392 ifmr->ifm_status = IFM_AVALID;
2393 if (psr & MVNETA_PSR_LINKUP)
2394 ifmr->ifm_status |= IFM_ACTIVE;
2398 mvneta_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2400 struct mvneta_softc *sc;
2401 struct mii_data *mii;
2405 if (!sc->phy_attached && !sc->use_inband_status) {
2406 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
2412 if (sc->use_inband_status) {
2413 mvneta_get_media(sc, ifmr);
2414 mvneta_sc_unlock(sc);
2421 ifmr->ifm_active = mii->mii_media_active;
2422 ifmr->ifm_status = mii->mii_media_status;
2424 mvneta_sc_unlock(sc);
2431 mvneta_update_autoneg(struct mvneta_softc *sc, int enable)
2436 reg = MVNETA_READ(sc, MVNETA_PANC);
2437 reg &= ~(MVNETA_PANC_FORCELINKFAIL | MVNETA_PANC_FORCELINKPASS |
2438 MVNETA_PANC_ANFCEN);
2439 reg |= MVNETA_PANC_ANDUPLEXEN | MVNETA_PANC_ANSPEEDEN |
2440 MVNETA_PANC_INBANDANEN;
2441 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2443 reg = MVNETA_READ(sc, MVNETA_PMACC2);
2444 reg |= MVNETA_PMACC2_INBANDANMODE;
2445 MVNETA_WRITE(sc, MVNETA_PMACC2, reg);
2447 reg = MVNETA_READ(sc, MVNETA_PSOMSCD);
2448 reg |= MVNETA_PSOMSCD_ENABLE;
2449 MVNETA_WRITE(sc, MVNETA_PSOMSCD, reg);
2451 reg = MVNETA_READ(sc, MVNETA_PANC);
2452 reg &= ~(MVNETA_PANC_FORCELINKFAIL | MVNETA_PANC_FORCELINKPASS |
2453 MVNETA_PANC_ANDUPLEXEN | MVNETA_PANC_ANSPEEDEN |
2454 MVNETA_PANC_INBANDANEN);
2455 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2457 reg = MVNETA_READ(sc, MVNETA_PMACC2);
2458 reg &= ~MVNETA_PMACC2_INBANDANMODE;
2459 MVNETA_WRITE(sc, MVNETA_PMACC2, reg);
2461 reg = MVNETA_READ(sc, MVNETA_PSOMSCD);
2462 reg &= ~MVNETA_PSOMSCD_ENABLE;
2463 MVNETA_WRITE(sc, MVNETA_PSOMSCD, reg);
2468 mvneta_update_media(struct mvneta_softc *sc, int media)
2477 mvneta_linkreset(sc);
2479 running = (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
2481 mvneta_stop_locked(sc);
2483 sc->autoneg = (IFM_SUBTYPE(media) == IFM_AUTO);
2485 if (sc->use_inband_status)
2486 mvneta_update_autoneg(sc, IFM_SUBTYPE(media) == IFM_AUTO);
2488 mvneta_update_eee(sc);
2489 mvneta_update_fc(sc);
2491 if (IFM_SUBTYPE(media) != IFM_AUTO) {
2492 reg = MVNETA_READ(sc, MVNETA_PANC);
2493 reg &= ~(MVNETA_PANC_SETGMIISPEED |
2494 MVNETA_PANC_SETMIISPEED |
2495 MVNETA_PANC_SETFULLDX);
2496 if (IFM_SUBTYPE(media) == IFM_1000_T ||
2497 IFM_SUBTYPE(media) == IFM_2500_T) {
2498 if ((media & IFM_FDX) == 0) {
2499 device_printf(sc->dev,
2500 "%s half-duplex unsupported\n",
2501 IFM_SUBTYPE(media) == IFM_1000_T ?
2507 reg |= MVNETA_PANC_SETGMIISPEED;
2508 } else if (IFM_SUBTYPE(media) == IFM_100_TX)
2509 reg |= MVNETA_PANC_SETMIISPEED;
2511 if (media & IFM_FDX)
2512 reg |= MVNETA_PANC_SETFULLDX;
2514 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2518 mvneta_init_locked(sc);
2519 mvneta_sc_unlock(sc);
2524 mvneta_adjust_link(struct mvneta_softc *sc)
2526 boolean_t phy_linkup;
2530 mvneta_update_eee(sc);
2531 mvneta_update_fc(sc);
2533 /* Check for link change */
2534 phy_linkup = (sc->mii->mii_media_status &
2535 (IFM_AVALID | IFM_ACTIVE)) == (IFM_AVALID | IFM_ACTIVE);
2537 if (sc->linkup != phy_linkup)
2538 mvneta_linkupdate(sc, phy_linkup);
2540 /* Don't update media on disabled link */
2544 /* Check for media type change */
2545 if (sc->mvneta_media != sc->mii->mii_media_active) {
2546 sc->mvneta_media = sc->mii->mii_media_active;
2548 reg = MVNETA_READ(sc, MVNETA_PANC);
2549 reg &= ~(MVNETA_PANC_SETGMIISPEED |
2550 MVNETA_PANC_SETMIISPEED |
2551 MVNETA_PANC_SETFULLDX);
2552 if (IFM_SUBTYPE(sc->mvneta_media) == IFM_1000_T ||
2553 IFM_SUBTYPE(sc->mvneta_media) == IFM_2500_T) {
2554 reg |= MVNETA_PANC_SETGMIISPEED;
2555 } else if (IFM_SUBTYPE(sc->mvneta_media) == IFM_100_TX)
2556 reg |= MVNETA_PANC_SETMIISPEED;
2558 if (sc->mvneta_media & IFM_FDX)
2559 reg |= MVNETA_PANC_SETFULLDX;
2561 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2566 mvneta_link_isr(struct mvneta_softc *sc)
2572 linkup = MVNETA_IS_LINKUP(sc) ? TRUE : FALSE;
2573 if (sc->linkup == linkup)
2579 mvneta_linkdown(sc);
2583 "%s: link %s\n", device_xname(sc->dev), linkup ? "up" : "down");
2588 mvneta_linkupdate(struct mvneta_softc *sc, boolean_t linkup)
2596 mvneta_linkdown(sc);
2600 "%s: link %s\n", device_xname(sc->dev), linkup ? "up" : "down");
2605 mvneta_update_eee(struct mvneta_softc *sc)
2611 /* set EEE parameters */
2612 reg = MVNETA_READ(sc, MVNETA_LPIC1);
2614 reg |= MVNETA_LPIC1_LPIRE;
2616 reg &= ~MVNETA_LPIC1_LPIRE;
2617 MVNETA_WRITE(sc, MVNETA_LPIC1, reg);
2621 mvneta_update_fc(struct mvneta_softc *sc)
2627 reg = MVNETA_READ(sc, MVNETA_PANC);
2629 /* Flow control negotiation */
2630 reg |= MVNETA_PANC_PAUSEADV;
2631 reg |= MVNETA_PANC_ANFCEN;
2633 /* Disable flow control negotiation */
2634 reg &= ~MVNETA_PANC_PAUSEADV;
2635 reg &= ~MVNETA_PANC_ANFCEN;
2638 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2642 mvneta_linkup(struct mvneta_softc *sc)
2648 if (!sc->use_inband_status) {
2649 reg = MVNETA_READ(sc, MVNETA_PANC);
2650 reg |= MVNETA_PANC_FORCELINKPASS;
2651 reg &= ~MVNETA_PANC_FORCELINKFAIL;
2652 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2655 mvneta_qflush(sc->ifp);
2658 if_link_state_change(sc->ifp, LINK_STATE_UP);
2662 mvneta_linkdown(struct mvneta_softc *sc)
2668 if (!sc->use_inband_status) {
2669 reg = MVNETA_READ(sc, MVNETA_PANC);
2670 reg &= ~MVNETA_PANC_FORCELINKPASS;
2671 reg |= MVNETA_PANC_FORCELINKFAIL;
2672 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2675 mvneta_portdown(sc);
2676 mvneta_qflush(sc->ifp);
2678 if_link_state_change(sc->ifp, LINK_STATE_DOWN);
2682 mvneta_linkreset(struct mvneta_softc *sc)
2684 struct mii_softc *mii;
2686 if (sc->phy_attached) {
2687 /* Force reset PHY */
2688 mii = LIST_FIRST(&sc->mii->mii_phys);
2698 mvneta_tx_queue(struct mvneta_softc *sc, struct mbuf **mbufp, int q)
2701 bus_dma_segment_t txsegs[MVNETA_TX_SEGLIMIT];
2702 struct mbuf *mtmp, *mbuf;
2703 struct mvneta_tx_ring *tx;
2704 struct mvneta_buf *txbuf;
2705 struct mvneta_tx_desc *t;
2707 int start, used, error, i, txnsegs;
2710 tx = MVNETA_TX_RING(sc, q);
2711 DASSERT(tx->used >= 0);
2712 DASSERT(tx->used <= MVNETA_TX_RING_CNT);
2716 if (__predict_false(mbuf->m_flags & M_VLANTAG)) {
2717 mbuf = ether_vlanencap(mbuf, mbuf->m_pkthdr.ether_vtag);
2723 mbuf->m_flags &= ~M_VLANTAG;
2727 if (__predict_false(mbuf->m_next != NULL &&
2728 (mbuf->m_pkthdr.csum_flags &
2729 (CSUM_IP | CSUM_TCP | CSUM_UDP)) != 0)) {
2730 if (M_WRITABLE(mbuf) == 0) {
2731 mtmp = m_dup(mbuf, M_NOWAIT);
2738 *mbufp = mbuf = mtmp;
2742 /* load mbuf using dmamap of 1st descriptor */
2743 txbuf = &tx->txbuf[tx->cpu];
2744 error = bus_dmamap_load_mbuf_sg(sc->txmbuf_dtag,
2745 txbuf->dmap, mbuf, txsegs, &txnsegs,
2747 if (__predict_false(error != 0)) {
2749 CTR3(KTR_SPARE2, "%s:%u bus_dmamap_load_mbuf_sg error=%d", ifp->if_xname, q, error);
2751 /* This is the only recoverable error (except EFBIG). */
2752 if (error != ENOMEM) {
2761 if (__predict_false(txnsegs <= 0
2762 || (txnsegs + tx->used) > MVNETA_TX_RING_CNT)) {
2763 /* we have no enough descriptors or mbuf is broken */
2765 CTR3(KTR_SPARE2, "%s:%u not enough descriptors txnsegs=%d",
2766 ifp->if_xname, q, txnsegs);
2768 bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
2771 DASSERT(txbuf->m == NULL);
2773 /* remember mbuf using 1st descriptor */
2775 bus_dmamap_sync(sc->txmbuf_dtag, txbuf->dmap,
2776 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2778 /* load to tx descriptors */
2781 for (i = 0; i < txnsegs; i++) {
2782 t = &tx->desc[tx->cpu];
2786 if (__predict_true(i == 0)) {
2787 /* 1st descriptor */
2788 t->command |= MVNETA_TX_CMD_W_PACKET_OFFSET(0);
2789 t->command |= MVNETA_TX_CMD_F;
2790 mvneta_tx_set_csumflag(ifp, t, mbuf);
2792 t->bufptr_pa = txsegs[i].ds_addr;
2793 t->bytecnt = txsegs[i].ds_len;
2794 tx->cpu = tx_counter_adv(tx->cpu, 1);
2799 /* t is last descriptor here */
2801 t->command |= MVNETA_TX_CMD_L|MVNETA_TX_CMD_PADDING;
2803 bus_dmamap_sync(sc->tx_dtag, tx->desc_map,
2804 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2806 while (__predict_false(used > 255)) {
2807 ptxsu = MVNETA_PTXSU_NOWD(255);
2808 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
2811 if (__predict_true(used > 0)) {
2812 ptxsu = MVNETA_PTXSU_NOWD(used);
2813 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
2819 mvneta_tx_set_csumflag(struct ifnet *ifp,
2820 struct mvneta_tx_desc *t, struct mbuf *m)
2822 struct ether_header *eh;
2824 uint32_t iphl, ipoff;
2828 csum_flags = ifp->if_hwassist & m->m_pkthdr.csum_flags;
2829 eh = mtod(m, struct ether_header *);
2831 switch (ntohs(eh->ether_type)) {
2833 ipoff = ETHER_HDR_LEN;
2835 case ETHERTYPE_VLAN:
2836 ipoff = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2842 if (__predict_true(csum_flags & (CSUM_IP|CSUM_IP_TCP|CSUM_IP_UDP))) {
2843 ip = (struct ip *)(m->m_data + ipoff);
2844 iphl = ip->ip_hl<<2;
2845 t->command |= MVNETA_TX_CMD_L3_IP4;
2847 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NONE;
2853 if (csum_flags & CSUM_IP) {
2854 t->command |= MVNETA_TX_CMD_IP4_CHECKSUM;
2858 if (csum_flags & CSUM_IP_TCP) {
2859 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG;
2860 t->command |= MVNETA_TX_CMD_L4_TCP;
2861 } else if (csum_flags & CSUM_IP_UDP) {
2862 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG;
2863 t->command |= MVNETA_TX_CMD_L4_UDP;
2865 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NONE;
2868 t->command |= MVNETA_TX_CMD_IP_HEADER_LEN(iphl >> 2);
2869 t->command |= MVNETA_TX_CMD_L3_OFFSET(ipoff);
2873 mvneta_tx_queue_complete(struct mvneta_softc *sc, int q)
2875 struct mvneta_tx_ring *tx;
2876 struct mvneta_buf *txbuf;
2877 struct mvneta_tx_desc *t;
2878 uint32_t ptxs, ptxsu, ndesc;
2881 KASSERT_TX_MTX(sc, q);
2883 tx = MVNETA_TX_RING(sc, q);
2884 if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED))
2887 ptxs = MVNETA_READ(sc, MVNETA_PTXS(q));
2888 ndesc = MVNETA_PTXS_GET_TBC(ptxs);
2890 if (__predict_false(ndesc == 0)) {
2892 tx->queue_status = MVNETA_QUEUE_IDLE;
2893 else if (tx->queue_status == MVNETA_QUEUE_WORKING &&
2894 ((ticks - tx->watchdog_time) > MVNETA_WATCHDOG))
2895 tx->queue_hung = TRUE;
2900 CTR3(KTR_SPARE2, "%s:%u tx_complete begin ndesc=%u",
2901 sc->ifp->if_xname, q, ndesc);
2904 bus_dmamap_sync(sc->tx_dtag, tx->desc_map,
2905 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2907 for (i = 0; i < ndesc; i++) {
2908 t = &tx->desc[tx->dma];
2910 if (t->flags & MVNETA_TX_F_ES)
2911 CTR3(KTR_SPARE2, "%s tx error queue %d desc %d",
2912 sc->ifp->if_xname, q, tx->dma);
2914 txbuf = &tx->txbuf[tx->dma];
2915 if (__predict_true(txbuf->m != NULL)) {
2916 DASSERT((t->command & MVNETA_TX_CMD_F) != 0);
2917 bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
2922 DASSERT((t->flags & MVNETA_TX_CMD_F) == 0);
2923 tx->dma = tx_counter_adv(tx->dma, 1);
2926 DASSERT(tx->used >= 0);
2927 DASSERT(tx->used <= MVNETA_TX_RING_CNT);
2928 while (__predict_false(ndesc > 255)) {
2929 ptxsu = MVNETA_PTXSU_NORB(255);
2930 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
2933 if (__predict_true(ndesc > 0)) {
2934 ptxsu = MVNETA_PTXSU_NORB(ndesc);
2935 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
2938 CTR5(KTR_SPARE2, "%s:%u tx_complete tx_cpu=%d tx_dma=%d tx_used=%d",
2939 sc->ifp->if_xname, q, tx->cpu, tx->dma, tx->used);
2942 tx->watchdog_time = ticks;
2945 tx->queue_status = MVNETA_QUEUE_IDLE;
2949 * Do a final TX complete when TX is idle.
2952 mvneta_tx_drain(struct mvneta_softc *sc)
2954 struct mvneta_tx_ring *tx;
2958 * Handle trailing mbuf on TX queue.
2959 * Check is done lockess to avoid TX path contention.
2961 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
2962 tx = MVNETA_TX_RING(sc, q);
2963 if ((ticks - tx->watchdog_time) > MVNETA_WATCHDOG_TXCOMP &&
2965 mvneta_tx_lockq(sc, q);
2966 mvneta_tx_queue_complete(sc, q);
2967 mvneta_tx_unlockq(sc, q);
2976 mvneta_rx(struct mvneta_softc *sc, int q, int count)
2978 uint32_t prxs, npkt;
2982 mvneta_rx_lockq(sc, q);
2983 prxs = MVNETA_READ(sc, MVNETA_PRXS(q));
2984 npkt = MVNETA_PRXS_GET_ODC(prxs);
2985 if (__predict_false(npkt == 0))
2988 if (count > 0 && npkt > count) {
2992 mvneta_rx_queue(sc, q, npkt);
2994 mvneta_rx_unlockq(sc, q);
2999 * Helper routine for updating PRXSU register of a given queue.
3000 * Handles number of processed descriptors bigger than maximum acceptable value.
3002 STATIC __inline void
3003 mvneta_prxsu_update(struct mvneta_softc *sc, int q, int processed)
3007 while (__predict_false(processed > 255)) {
3008 prxsu = MVNETA_PRXSU_NOOFPROCESSEDDESCRIPTORS(255);
3009 MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
3012 prxsu = MVNETA_PRXSU_NOOFPROCESSEDDESCRIPTORS(processed);
3013 MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
3016 static __inline void
3017 mvneta_prefetch(void *p)
3020 __builtin_prefetch(p);
3024 mvneta_rx_queue(struct mvneta_softc *sc, int q, int npkt)
3027 struct mvneta_rx_ring *rx;
3028 struct mvneta_rx_desc *r;
3029 struct mvneta_buf *rxbuf;
3031 struct lro_ctrl *lro;
3032 struct lro_entry *queued;
3034 int i, pktlen, processed, ndma;
3036 KASSERT_RX_MTX(sc, q);
3039 rx = MVNETA_RX_RING(sc, q);
3042 if (__predict_false(rx->queue_status == MVNETA_QUEUE_DISABLED))
3045 bus_dmamap_sync(sc->rx_dtag, rx->desc_map,
3046 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3048 for (i = 0; i < npkt; i++) {
3049 /* Prefetch next desc, rxbuf. */
3050 ndma = rx_counter_adv(rx->dma, 1);
3051 mvneta_prefetch(&rx->desc[ndma]);
3052 mvneta_prefetch(&rx->rxbuf[ndma]);
3054 /* get descriptor and packet */
3055 r = &rx->desc[rx->dma];
3056 rxbuf = &rx->rxbuf[rx->dma];
3060 bus_dmamap_sync(sc->rxbuf_dtag, rxbuf->dmap,
3061 BUS_DMASYNC_POSTREAD);
3062 bus_dmamap_unload(sc->rxbuf_dtag, rxbuf->dmap);
3063 /* Prefetch mbuf header. */
3067 /* Drop desc with error status or not in a single buffer. */
3068 DASSERT((r->status & (MVNETA_RX_F|MVNETA_RX_L)) ==
3069 (MVNETA_RX_F|MVNETA_RX_L));
3070 if (__predict_false((r->status & MVNETA_RX_ES) ||
3071 (r->status & (MVNETA_RX_F|MVNETA_RX_L)) !=
3072 (MVNETA_RX_F|MVNETA_RX_L)))
3076 * [ OFF | MH | PKT | CRC ]
3077 * bytecnt cover MH, PKT, CRC
3079 pktlen = r->bytecnt - ETHER_CRC_LEN - MVNETA_HWHEADER_SIZE;
3080 pktbuf = (uint8_t *)rx->rxbuf_virt_addr[rx->dma] + MVNETA_PACKET_OFFSET +
3081 MVNETA_HWHEADER_SIZE;
3083 /* Prefetch mbuf data. */
3084 mvneta_prefetch(pktbuf);
3086 /* Write value to mbuf (avoid read). */
3088 m->m_len = m->m_pkthdr.len = pktlen;
3089 m->m_pkthdr.rcvif = ifp;
3090 mvneta_rx_set_csumflag(ifp, r, m);
3092 /* Increase rx_dma before releasing the lock. */
3095 if (__predict_false(rx->lro_enabled &&
3096 ((r->status & MVNETA_RX_L3_IP) != 0) &&
3097 ((r->status & MVNETA_RX_L4_MASK) == MVNETA_RX_L4_TCP) &&
3098 (m->m_pkthdr.csum_flags &
3099 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
3100 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR))) {
3101 if (rx->lro.lro_cnt != 0) {
3102 if (tcp_lro_rx(&rx->lro, m, 0) == 0)
3107 mvneta_rx_unlockq(sc, q);
3108 (*ifp->if_input)(ifp, m);
3109 mvneta_rx_lockq(sc, q);
3111 * Check whether this queue has been disabled in the
3112 * meantime. If yes, then clear LRO and exit.
3114 if(__predict_false(rx->queue_status == MVNETA_QUEUE_DISABLED))
3117 /* Refresh receive ring to avoid stall and minimize jitter. */
3118 if (processed >= MVNETA_RX_REFILL_COUNT) {
3119 mvneta_prxsu_update(sc, q, processed);
3120 mvneta_rx_queue_refill(sc, q);
3127 /* Refresh receive ring to avoid stall and minimize jitter. */
3128 if (processed >= MVNETA_RX_REFILL_COUNT) {
3129 mvneta_prxsu_update(sc, q, processed);
3130 mvneta_rx_queue_refill(sc, q);
3135 CTR3(KTR_SPARE2, "%s:%u %u packets received", ifp->if_xname, q, npkt);
3137 /* DMA status update */
3138 mvneta_prxsu_update(sc, q, processed);
3139 /* Refill the rest of buffers if there are any to refill */
3140 mvneta_rx_queue_refill(sc, q);
3144 * Flush any outstanding LRO work
3147 while (__predict_false((queued = LIST_FIRST(&lro->lro_active)) != NULL)) {
3148 LIST_REMOVE(LIST_FIRST((&lro->lro_active)), next);
3149 tcp_lro_flush(lro, queued);
3154 mvneta_rx_buf_free(struct mvneta_softc *sc, struct mvneta_buf *rxbuf)
3157 bus_dmamap_unload(sc->rxbuf_dtag, rxbuf->dmap);
3158 /* This will remove all data at once */
3163 mvneta_rx_queue_refill(struct mvneta_softc *sc, int q)
3165 struct mvneta_rx_ring *rx;
3166 struct mvneta_rx_desc *r;
3167 struct mvneta_buf *rxbuf;
3168 bus_dma_segment_t segs;
3170 uint32_t prxs, prxsu, ndesc;
3171 int npkt, refill, nsegs, error;
3173 KASSERT_RX_MTX(sc, q);
3175 rx = MVNETA_RX_RING(sc, q);
3176 prxs = MVNETA_READ(sc, MVNETA_PRXS(q));
3177 ndesc = MVNETA_PRXS_GET_NODC(prxs) + MVNETA_PRXS_GET_ODC(prxs);
3178 refill = MVNETA_RX_RING_CNT - ndesc;
3180 CTR3(KTR_SPARE2, "%s:%u refill %u packets", sc->ifp->if_xname, q,
3183 if (__predict_false(refill <= 0))
3186 for (npkt = 0; npkt < refill; npkt++) {
3187 rxbuf = &rx->rxbuf[rx->cpu];
3188 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, sc->rx_frame_size);
3189 if (__predict_false(m == NULL)) {
3193 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3195 error = bus_dmamap_load_mbuf_sg(sc->rxbuf_dtag, rxbuf->dmap,
3196 m, &segs, &nsegs, BUS_DMA_NOWAIT);
3197 if (__predict_false(error != 0 || nsegs != 1)) {
3198 KASSERT(1, ("Failed to load Rx mbuf DMA map"));
3203 /* Add the packet to the ring */
3205 r = &rx->desc[rx->cpu];
3206 r->bufptr_pa = segs.ds_addr;
3207 rx->rxbuf_virt_addr[rx->cpu] = m->m_data;
3209 rx->cpu = rx_counter_adv(rx->cpu, 1);
3212 if (refill == MVNETA_RX_RING_CNT)
3213 rx->needs_refill = TRUE;
3217 rx->needs_refill = FALSE;
3218 bus_dmamap_sync(sc->rx_dtag, rx->desc_map, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3220 while (__predict_false(npkt > 255)) {
3221 prxsu = MVNETA_PRXSU_NOOFNEWDESCRIPTORS(255);
3222 MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
3225 if (__predict_true(npkt > 0)) {
3226 prxsu = MVNETA_PRXSU_NOOFNEWDESCRIPTORS(npkt);
3227 MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
3231 STATIC __inline void
3232 mvneta_rx_set_csumflag(struct ifnet *ifp,
3233 struct mvneta_rx_desc *r, struct mbuf *m)
3235 uint32_t csum_flags;
3238 if (__predict_false((r->status &
3239 (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP)) == 0))
3240 return; /* not a IP packet */
3243 if (__predict_true((r->status & MVNETA_RX_IP_HEADER_OK) ==
3244 MVNETA_RX_IP_HEADER_OK))
3245 csum_flags |= CSUM_L3_CALC|CSUM_L3_VALID;
3247 if (__predict_true((r->status & (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP)) ==
3248 (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP))) {
3250 switch (r->status & MVNETA_RX_L4_MASK) {
3251 case MVNETA_RX_L4_TCP:
3252 case MVNETA_RX_L4_UDP:
3253 csum_flags |= CSUM_L4_CALC;
3254 if (__predict_true((r->status &
3255 MVNETA_RX_L4_CHECKSUM_OK) == MVNETA_RX_L4_CHECKSUM_OK)) {
3256 csum_flags |= CSUM_L4_VALID;
3257 m->m_pkthdr.csum_data = htons(0xffff);
3260 case MVNETA_RX_L4_OTH:
3265 m->m_pkthdr.csum_flags = csum_flags;
3269 * MAC address filter
3272 mvneta_filter_setup(struct mvneta_softc *sc)
3275 uint32_t dfut[MVNETA_NDFUT], dfsmt[MVNETA_NDFSMT], dfomt[MVNETA_NDFOMT];
3281 memset(dfut, 0, sizeof(dfut));
3282 memset(dfsmt, 0, sizeof(dfsmt));
3283 memset(dfomt, 0, sizeof(dfomt));
3286 ifp->if_flags |= IFF_ALLMULTI;
3287 if (ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) {
3288 for (i = 0; i < MVNETA_NDFSMT; i++) {
3289 dfsmt[i] = dfomt[i] =
3290 MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3291 MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3292 MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3293 MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
3297 pxc = MVNETA_READ(sc, MVNETA_PXC);
3298 pxc &= ~(MVNETA_PXC_UPM | MVNETA_PXC_RXQ_MASK | MVNETA_PXC_RXQARP_MASK |
3299 MVNETA_PXC_TCPQ_MASK | MVNETA_PXC_UDPQ_MASK | MVNETA_PXC_BPDUQ_MASK);
3300 pxc |= MVNETA_PXC_RXQ(MVNETA_RX_QNUM_MAX-1);
3301 pxc |= MVNETA_PXC_RXQARP(MVNETA_RX_QNUM_MAX-1);
3302 pxc |= MVNETA_PXC_TCPQ(MVNETA_RX_QNUM_MAX-1);
3303 pxc |= MVNETA_PXC_UDPQ(MVNETA_RX_QNUM_MAX-1);
3304 pxc |= MVNETA_PXC_BPDUQ(MVNETA_RX_QNUM_MAX-1);
3305 pxc |= MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP;
3306 if (ifp->if_flags & IFF_BROADCAST) {
3307 pxc &= ~(MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP);
3309 if (ifp->if_flags & IFF_PROMISC) {
3310 pxc |= MVNETA_PXC_UPM;
3312 MVNETA_WRITE(sc, MVNETA_PXC, pxc);
3314 /* Set Destination Address Filter Unicast Table */
3315 if (ifp->if_flags & IFF_PROMISC) {
3316 /* pass all unicast addresses */
3317 for (i = 0; i < MVNETA_NDFUT; i++) {
3319 MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3320 MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3321 MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3322 MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
3325 i = sc->enaddr[5] & 0xf; /* last nibble */
3326 dfut[i>>2] = MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
3328 MVNETA_WRITE_REGION(sc, MVNETA_DFUT(0), dfut, MVNETA_NDFUT);
3330 /* Set Destination Address Filter Multicast Tables */
3331 MVNETA_WRITE_REGION(sc, MVNETA_DFSMT(0), dfsmt, MVNETA_NDFSMT);
3332 MVNETA_WRITE_REGION(sc, MVNETA_DFOMT(0), dfomt, MVNETA_NDFOMT);
3339 sysctl_read_mib(SYSCTL_HANDLER_ARGS)
3341 struct mvneta_sysctl_mib *arg;
3342 struct mvneta_softc *sc;
3345 arg = (struct mvneta_sysctl_mib *)arg1;
3352 if (arg->index < 0 || arg->index > MVNETA_PORTMIB_NOCOUNTER)
3357 mvneta_sc_unlock(sc);
3358 return sysctl_handle_64(oidp, &val, 0, req);
3363 sysctl_clear_mib(SYSCTL_HANDLER_ARGS)
3365 struct mvneta_softc *sc;
3369 sc = (struct mvneta_softc *)arg1;
3373 err = sysctl_handle_int(oidp, &val, 0, req);
3377 if (val < 0 || val > 1)
3382 mvneta_clear_mib(sc);
3383 mvneta_sc_unlock(sc);
3390 sysctl_set_queue_rxthtime(SYSCTL_HANDLER_ARGS)
3392 struct mvneta_sysctl_queue *arg;
3393 struct mvneta_rx_ring *rx;
3394 struct mvneta_softc *sc;
3395 uint32_t reg, time_mvtclk;
3399 arg = (struct mvneta_sysctl_queue *)arg1;
3402 if (arg->queue < 0 || arg->queue > MVNETA_RX_RING_CNT)
3404 if (arg->rxtx != MVNETA_SYSCTL_RX)
3411 /* read queue length */
3413 mvneta_rx_lockq(sc, arg->queue);
3414 rx = MVNETA_RX_RING(sc, arg->queue);
3415 time_mvtclk = rx->queue_th_time;
3416 time_us = ((uint64_t)time_mvtclk * 1000ULL * 1000ULL) / mvneta_get_clk();
3417 mvneta_rx_unlockq(sc, arg->queue);
3418 mvneta_sc_unlock(sc);
3420 err = sysctl_handle_int(oidp, &time_us, 0, req);
3425 mvneta_rx_lockq(sc, arg->queue);
3427 /* update queue length (0[sec] - 1[sec]) */
3428 if (time_us < 0 || time_us > (1000 * 1000)) {
3429 mvneta_rx_unlockq(sc, arg->queue);
3430 mvneta_sc_unlock(sc);
3434 (uint64_t)mvneta_get_clk() * (uint64_t)time_us / (1000ULL * 1000ULL);
3435 rx->queue_th_time = time_mvtclk;
3436 reg = MVNETA_PRXITTH_RITT(rx->queue_th_time);
3437 MVNETA_WRITE(sc, MVNETA_PRXITTH(arg->queue), reg);
3438 mvneta_rx_unlockq(sc, arg->queue);
3439 mvneta_sc_unlock(sc);
3445 sysctl_mvneta_init(struct mvneta_softc *sc)
3447 struct sysctl_ctx_list *ctx;
3448 struct sysctl_oid_list *children;
3449 struct sysctl_oid_list *rxchildren;
3450 struct sysctl_oid_list *qchildren, *mchildren;
3451 struct sysctl_oid *tree;
3453 struct mvneta_sysctl_queue *rxarg;
3454 #define MVNETA_SYSCTL_NAME(num) "queue" # num
3455 static const char *sysctl_queue_names[] = {
3456 MVNETA_SYSCTL_NAME(0), MVNETA_SYSCTL_NAME(1),
3457 MVNETA_SYSCTL_NAME(2), MVNETA_SYSCTL_NAME(3),
3458 MVNETA_SYSCTL_NAME(4), MVNETA_SYSCTL_NAME(5),
3459 MVNETA_SYSCTL_NAME(6), MVNETA_SYSCTL_NAME(7),
3461 #undef MVNETA_SYSCTL_NAME
3463 #ifndef NO_SYSCTL_DESCR
3464 #define MVNETA_SYSCTL_DESCR(num) "configuration parameters for queue " # num
3465 static const char *sysctl_queue_descrs[] = {
3466 MVNETA_SYSCTL_DESCR(0), MVNETA_SYSCTL_DESCR(1),
3467 MVNETA_SYSCTL_DESCR(2), MVNETA_SYSCTL_DESCR(3),
3468 MVNETA_SYSCTL_DESCR(4), MVNETA_SYSCTL_DESCR(5),
3469 MVNETA_SYSCTL_DESCR(6), MVNETA_SYSCTL_DESCR(7),
3471 #undef MVNETA_SYSCTL_DESCR
3475 ctx = device_get_sysctl_ctx(sc->dev);
3476 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
3478 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rx",
3479 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "NETA RX");
3480 rxchildren = SYSCTL_CHILDREN(tree);
3481 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "mib",
3482 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "NETA MIB");
3483 mchildren = SYSCTL_CHILDREN(tree);
3486 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "flow_control",
3487 CTLFLAG_RW, &sc->cf_fc, 0, "flow control");
3488 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lpi",
3489 CTLFLAG_RW, &sc->cf_lpi, 0, "Low Power Idle");
3494 /* dev.mvneta.[unit].mib.<mibs> */
3495 for (i = 0; i < MVNETA_PORTMIB_NOCOUNTER; i++) {
3496 struct mvneta_sysctl_mib *mib_arg = &sc->sysctl_mib[i];
3500 SYSCTL_ADD_PROC(ctx, mchildren, OID_AUTO,
3501 mvneta_mib_list[i].sysctl_name,
3502 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3503 (void *)mib_arg, 0, sysctl_read_mib, "I",
3504 mvneta_mib_list[i].desc);
3506 SYSCTL_ADD_UQUAD(ctx, mchildren, OID_AUTO, "rx_discard",
3507 CTLFLAG_RD, &sc->counter_pdfc, "Port Rx Discard Frame Counter");
3508 SYSCTL_ADD_UQUAD(ctx, mchildren, OID_AUTO, "overrun",
3509 CTLFLAG_RD, &sc->counter_pofc, "Port Overrun Frame Counter");
3510 SYSCTL_ADD_UINT(ctx, mchildren, OID_AUTO, "watchdog",
3511 CTLFLAG_RD, &sc->counter_watchdog, 0, "TX Watchdog Counter");
3513 SYSCTL_ADD_PROC(ctx, mchildren, OID_AUTO, "reset",
3514 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
3515 (void *)sc, 0, sysctl_clear_mib, "I", "Reset MIB counters");
3517 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
3518 rxarg = &sc->sysctl_rx_queue[q];
3522 rxarg->rxtx = MVNETA_SYSCTL_RX;
3524 /* hw.mvneta.mvneta[unit].rx.[queue] */
3525 tree = SYSCTL_ADD_NODE(ctx, rxchildren, OID_AUTO,
3526 sysctl_queue_names[q], CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
3527 sysctl_queue_descrs[q]);
3528 qchildren = SYSCTL_CHILDREN(tree);
3530 /* hw.mvneta.mvneta[unit].rx.[queue].threshold_timer_us */
3531 SYSCTL_ADD_PROC(ctx, qchildren, OID_AUTO, "threshold_timer_us",
3532 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, rxarg, 0,
3533 sysctl_set_queue_rxthtime, "I",
3534 "interrupt coalescing threshold timer [us]");
3542 mvneta_clear_mib(struct mvneta_softc *sc)
3548 for (i = 0; i < nitems(mvneta_mib_list); i++) {
3549 if (mvneta_mib_list[i].reg64)
3550 MVNETA_READ_MIB_8(sc, mvneta_mib_list[i].regnum);
3552 MVNETA_READ_MIB_4(sc, mvneta_mib_list[i].regnum);
3553 sc->sysctl_mib[i].counter = 0;
3555 MVNETA_READ(sc, MVNETA_PDFC);
3556 sc->counter_pdfc = 0;
3557 MVNETA_READ(sc, MVNETA_POFC);
3558 sc->counter_pofc = 0;
3559 sc->counter_watchdog = 0;
3563 mvneta_update_mib(struct mvneta_softc *sc)
3565 struct mvneta_tx_ring *tx;
3570 for (i = 0; i < nitems(mvneta_mib_list); i++) {
3572 if (mvneta_mib_list[i].reg64)
3573 val = MVNETA_READ_MIB_8(sc, mvneta_mib_list[i].regnum);
3575 val = MVNETA_READ_MIB_4(sc, mvneta_mib_list[i].regnum);
3580 sc->sysctl_mib[i].counter += val;
3581 switch (mvneta_mib_list[i].regnum) {
3582 case MVNETA_MIB_RX_GOOD_OCT:
3583 if_inc_counter(sc->ifp, IFCOUNTER_IBYTES, val);
3585 case MVNETA_MIB_RX_BAD_FRAME:
3586 if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, val);
3588 case MVNETA_MIB_RX_GOOD_FRAME:
3589 if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, val);
3591 case MVNETA_MIB_RX_MCAST_FRAME:
3592 if_inc_counter(sc->ifp, IFCOUNTER_IMCASTS, val);
3594 case MVNETA_MIB_TX_GOOD_OCT:
3595 if_inc_counter(sc->ifp, IFCOUNTER_OBYTES, val);
3597 case MVNETA_MIB_TX_GOOD_FRAME:
3598 if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, val);
3600 case MVNETA_MIB_TX_MCAST_FRAME:
3601 if_inc_counter(sc->ifp, IFCOUNTER_OMCASTS, val);
3603 case MVNETA_MIB_MAC_COL:
3604 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, val);
3606 case MVNETA_MIB_TX_MAC_TRNS_ERR:
3607 case MVNETA_MIB_TX_EXCES_COL:
3608 case MVNETA_MIB_MAC_LATE_COL:
3609 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, val);
3614 reg = MVNETA_READ(sc, MVNETA_PDFC);
3615 sc->counter_pdfc += reg;
3616 if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, reg);
3617 reg = MVNETA_READ(sc, MVNETA_POFC);
3618 sc->counter_pofc += reg;
3619 if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, reg);
3622 if (sc->counter_watchdog_mib > 0) {
3623 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, sc->counter_watchdog_mib);
3624 sc->counter_watchdog_mib = 0;
3628 * We do not take queue locks to not disrupt TX path.
3629 * We may only miss one drv error which will be fixed at
3630 * next mib update. We may also clear counter when TX path
3631 * is incrementing it but we only do it if counter was not zero
3632 * thus we may only loose one error.
3634 for (i = 0; i < MVNETA_TX_QNUM_MAX; i++) {
3635 tx = MVNETA_TX_RING(sc, i);
3637 if (tx->drv_error > 0) {
3638 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, tx->drv_error);