2 * Copyright (c) 2017 Stormshield.
3 * Copyright (c) 2017 Semihalf.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
28 #include "opt_platform.h"
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/endian.h>
37 #include <sys/mutex.h>
38 #include <sys/kernel.h>
39 #include <sys/module.h>
40 #include <sys/socket.h>
41 #include <sys/sysctl.h>
43 #include <sys/taskqueue.h>
48 #include <net/ethernet.h>
51 #include <net/if_arp.h>
52 #include <net/if_dl.h>
53 #include <net/if_media.h>
54 #include <net/if_types.h>
55 #include <net/if_vlan_var.h>
57 #include <netinet/in_systm.h>
58 #include <netinet/in.h>
59 #include <netinet/ip.h>
60 #include <netinet/tcp_lro.h>
62 #include <sys/sockio.h>
64 #include <machine/bus.h>
66 #include <machine/resource.h>
68 #include <dev/mii/mii.h>
69 #include <dev/mii/miivar.h>
71 #include <dev/mdio/mdio.h>
73 #include <arm/mv/mvvar.h>
75 #if !defined(__aarch64__)
76 #include <arm/mv/mvreg.h>
77 #include <arm/mv/mvwin.h>
80 #include "if_mvnetareg.h"
81 #include "if_mvnetavar.h"
83 #include "miibus_if.h"
87 #define STATIC /* nothing */
92 #define DASSERT(x) KASSERT((x), (#x))
94 #define A3700_TCLK_250MHZ 250000000
96 /* Device Register Initialization */
97 STATIC int mvneta_initreg(struct ifnet *);
99 /* Descriptor Ring Control for each of queues */
100 STATIC int mvneta_ring_alloc_rx_queue(struct mvneta_softc *, int);
101 STATIC int mvneta_ring_alloc_tx_queue(struct mvneta_softc *, int);
102 STATIC void mvneta_ring_dealloc_rx_queue(struct mvneta_softc *, int);
103 STATIC void mvneta_ring_dealloc_tx_queue(struct mvneta_softc *, int);
104 STATIC int mvneta_ring_init_rx_queue(struct mvneta_softc *, int);
105 STATIC int mvneta_ring_init_tx_queue(struct mvneta_softc *, int);
106 STATIC void mvneta_ring_flush_rx_queue(struct mvneta_softc *, int);
107 STATIC void mvneta_ring_flush_tx_queue(struct mvneta_softc *, int);
108 STATIC void mvneta_dmamap_cb(void *, bus_dma_segment_t *, int, int);
109 STATIC int mvneta_dma_create(struct mvneta_softc *);
111 /* Rx/Tx Queue Control */
112 STATIC int mvneta_rx_queue_init(struct ifnet *, int);
113 STATIC int mvneta_tx_queue_init(struct ifnet *, int);
114 STATIC int mvneta_rx_queue_enable(struct ifnet *, int);
115 STATIC int mvneta_tx_queue_enable(struct ifnet *, int);
116 STATIC void mvneta_rx_lockq(struct mvneta_softc *, int);
117 STATIC void mvneta_rx_unlockq(struct mvneta_softc *, int);
118 STATIC void mvneta_tx_lockq(struct mvneta_softc *, int);
119 STATIC void mvneta_tx_unlockq(struct mvneta_softc *, int);
121 /* Interrupt Handlers */
122 STATIC void mvneta_disable_intr(struct mvneta_softc *);
123 STATIC void mvneta_enable_intr(struct mvneta_softc *);
124 STATIC void mvneta_rxtxth_intr(void *);
125 STATIC int mvneta_misc_intr(struct mvneta_softc *);
126 STATIC void mvneta_tick(void *);
127 /* struct ifnet and mii callbacks*/
128 STATIC int mvneta_xmitfast_locked(struct mvneta_softc *, int, struct mbuf **);
129 STATIC int mvneta_xmit_locked(struct mvneta_softc *, int);
130 #ifdef MVNETA_MULTIQUEUE
131 STATIC int mvneta_transmit(struct ifnet *, struct mbuf *);
132 #else /* !MVNETA_MULTIQUEUE */
133 STATIC void mvneta_start(struct ifnet *);
135 STATIC void mvneta_qflush(struct ifnet *);
136 STATIC void mvneta_tx_task(void *, int);
137 STATIC int mvneta_ioctl(struct ifnet *, u_long, caddr_t);
138 STATIC void mvneta_init(void *);
139 STATIC void mvneta_init_locked(void *);
140 STATIC void mvneta_stop(struct mvneta_softc *);
141 STATIC void mvneta_stop_locked(struct mvneta_softc *);
142 STATIC int mvneta_mediachange(struct ifnet *);
143 STATIC void mvneta_mediastatus(struct ifnet *, struct ifmediareq *);
144 STATIC void mvneta_portup(struct mvneta_softc *);
145 STATIC void mvneta_portdown(struct mvneta_softc *);
147 /* Link State Notify */
148 STATIC void mvneta_update_autoneg(struct mvneta_softc *, int);
149 STATIC int mvneta_update_media(struct mvneta_softc *, int);
150 STATIC void mvneta_adjust_link(struct mvneta_softc *);
151 STATIC void mvneta_update_eee(struct mvneta_softc *);
152 STATIC void mvneta_update_fc(struct mvneta_softc *);
153 STATIC void mvneta_link_isr(struct mvneta_softc *);
154 STATIC void mvneta_linkupdate(struct mvneta_softc *, boolean_t);
155 STATIC void mvneta_linkup(struct mvneta_softc *);
156 STATIC void mvneta_linkdown(struct mvneta_softc *);
157 STATIC void mvneta_linkreset(struct mvneta_softc *);
160 STATIC int mvneta_tx_queue(struct mvneta_softc *, struct mbuf **, int);
161 STATIC void mvneta_tx_set_csumflag(struct ifnet *,
162 struct mvneta_tx_desc *, struct mbuf *);
163 STATIC void mvneta_tx_queue_complete(struct mvneta_softc *, int);
164 STATIC void mvneta_tx_drain(struct mvneta_softc *);
167 STATIC int mvneta_rx(struct mvneta_softc *, int, int);
168 STATIC void mvneta_rx_queue(struct mvneta_softc *, int, int);
169 STATIC void mvneta_rx_queue_refill(struct mvneta_softc *, int);
170 STATIC void mvneta_rx_set_csumflag(struct ifnet *,
171 struct mvneta_rx_desc *, struct mbuf *);
172 STATIC void mvneta_rx_buf_free(struct mvneta_softc *, struct mvneta_buf *);
174 /* MAC address filter */
175 STATIC void mvneta_filter_setup(struct mvneta_softc *);
178 STATIC int sysctl_read_mib(SYSCTL_HANDLER_ARGS);
179 STATIC int sysctl_clear_mib(SYSCTL_HANDLER_ARGS);
180 STATIC int sysctl_set_queue_rxthtime(SYSCTL_HANDLER_ARGS);
181 STATIC void sysctl_mvneta_init(struct mvneta_softc *);
184 STATIC void mvneta_clear_mib(struct mvneta_softc *);
185 STATIC uint64_t mvneta_read_mib(struct mvneta_softc *, int);
186 STATIC void mvneta_update_mib(struct mvneta_softc *);
189 STATIC boolean_t mvneta_has_switch(device_t);
191 #define mvneta_sc_lock(sc) mtx_lock(&sc->mtx)
192 #define mvneta_sc_unlock(sc) mtx_unlock(&sc->mtx)
194 STATIC struct mtx mii_mutex;
195 STATIC int mii_init = 0;
198 STATIC int mvneta_detach(device_t);
200 STATIC int mvneta_miibus_readreg(device_t, int, int);
201 STATIC int mvneta_miibus_writereg(device_t, int, int, int);
204 STATIC uint32_t mvneta_get_clk(void);
206 static device_method_t mvneta_methods[] = {
207 /* Device interface */
208 DEVMETHOD(device_detach, mvneta_detach),
210 DEVMETHOD(miibus_readreg, mvneta_miibus_readreg),
211 DEVMETHOD(miibus_writereg, mvneta_miibus_writereg),
213 DEVMETHOD(mdio_readreg, mvneta_miibus_readreg),
214 DEVMETHOD(mdio_writereg, mvneta_miibus_writereg),
220 DEFINE_CLASS_0(mvneta, mvneta_driver, mvneta_methods, sizeof(struct mvneta_softc));
222 DRIVER_MODULE(miibus, mvneta, miibus_driver, miibus_devclass, 0, 0);
223 DRIVER_MODULE(mdio, mvneta, mdio_driver, mdio_devclass, 0, 0);
224 MODULE_DEPEND(mvneta, mdio, 1, 1, 1);
225 MODULE_DEPEND(mvneta, ether, 1, 1, 1);
226 MODULE_DEPEND(mvneta, miibus, 1, 1, 1);
227 MODULE_DEPEND(mvneta, mvxpbm, 1, 1, 1);
230 * List of MIB register and names
234 MVNETA_MIB_RX_GOOD_OCT_IDX,
235 MVNETA_MIB_RX_BAD_OCT_IDX,
236 MVNETA_MIB_TX_MAC_TRNS_ERR_IDX,
237 MVNETA_MIB_RX_GOOD_FRAME_IDX,
238 MVNETA_MIB_RX_BAD_FRAME_IDX,
239 MVNETA_MIB_RX_BCAST_FRAME_IDX,
240 MVNETA_MIB_RX_MCAST_FRAME_IDX,
241 MVNETA_MIB_RX_FRAME64_OCT_IDX,
242 MVNETA_MIB_RX_FRAME127_OCT_IDX,
243 MVNETA_MIB_RX_FRAME255_OCT_IDX,
244 MVNETA_MIB_RX_FRAME511_OCT_IDX,
245 MVNETA_MIB_RX_FRAME1023_OCT_IDX,
246 MVNETA_MIB_RX_FRAMEMAX_OCT_IDX,
247 MVNETA_MIB_TX_GOOD_OCT_IDX,
248 MVNETA_MIB_TX_GOOD_FRAME_IDX,
249 MVNETA_MIB_TX_EXCES_COL_IDX,
250 MVNETA_MIB_TX_MCAST_FRAME_IDX,
251 MVNETA_MIB_TX_BCAST_FRAME_IDX,
252 MVNETA_MIB_TX_MAC_CTL_ERR_IDX,
253 MVNETA_MIB_FC_SENT_IDX,
254 MVNETA_MIB_FC_GOOD_IDX,
255 MVNETA_MIB_FC_BAD_IDX,
256 MVNETA_MIB_PKT_UNDERSIZE_IDX,
257 MVNETA_MIB_PKT_FRAGMENT_IDX,
258 MVNETA_MIB_PKT_OVERSIZE_IDX,
259 MVNETA_MIB_PKT_JABBER_IDX,
260 MVNETA_MIB_MAC_RX_ERR_IDX,
261 MVNETA_MIB_MAC_CRC_ERR_IDX,
262 MVNETA_MIB_MAC_COL_IDX,
263 MVNETA_MIB_MAC_LATE_COL_IDX,
266 STATIC struct mvneta_mib_def {
269 const char *sysctl_name;
271 } mvneta_mib_list[] = {
272 [MVNETA_MIB_RX_GOOD_OCT_IDX] = {MVNETA_MIB_RX_GOOD_OCT, 1,
273 "rx_good_oct", "Good Octets Rx"},
274 [MVNETA_MIB_RX_BAD_OCT_IDX] = {MVNETA_MIB_RX_BAD_OCT, 0,
275 "rx_bad_oct", "Bad Octets Rx"},
276 [MVNETA_MIB_TX_MAC_TRNS_ERR_IDX] = {MVNETA_MIB_TX_MAC_TRNS_ERR, 0,
277 "tx_mac_err", "MAC Transmit Error"},
278 [MVNETA_MIB_RX_GOOD_FRAME_IDX] = {MVNETA_MIB_RX_GOOD_FRAME, 0,
279 "rx_good_frame", "Good Frames Rx"},
280 [MVNETA_MIB_RX_BAD_FRAME_IDX] = {MVNETA_MIB_RX_BAD_FRAME, 0,
281 "rx_bad_frame", "Bad Frames Rx"},
282 [MVNETA_MIB_RX_BCAST_FRAME_IDX] = {MVNETA_MIB_RX_BCAST_FRAME, 0,
283 "rx_bcast_frame", "Broadcast Frames Rx"},
284 [MVNETA_MIB_RX_MCAST_FRAME_IDX] = {MVNETA_MIB_RX_MCAST_FRAME, 0,
285 "rx_mcast_frame", "Multicast Frames Rx"},
286 [MVNETA_MIB_RX_FRAME64_OCT_IDX] = {MVNETA_MIB_RX_FRAME64_OCT, 0,
287 "rx_frame_1_64", "Frame Size 1 - 64"},
288 [MVNETA_MIB_RX_FRAME127_OCT_IDX] = {MVNETA_MIB_RX_FRAME127_OCT, 0,
289 "rx_frame_65_127", "Frame Size 65 - 127"},
290 [MVNETA_MIB_RX_FRAME255_OCT_IDX] = {MVNETA_MIB_RX_FRAME255_OCT, 0,
291 "rx_frame_128_255", "Frame Size 128 - 255"},
292 [MVNETA_MIB_RX_FRAME511_OCT_IDX] = {MVNETA_MIB_RX_FRAME511_OCT, 0,
293 "rx_frame_256_511", "Frame Size 256 - 511"},
294 [MVNETA_MIB_RX_FRAME1023_OCT_IDX] = {MVNETA_MIB_RX_FRAME1023_OCT, 0,
295 "rx_frame_512_1023", "Frame Size 512 - 1023"},
296 [MVNETA_MIB_RX_FRAMEMAX_OCT_IDX] = {MVNETA_MIB_RX_FRAMEMAX_OCT, 0,
297 "rx_fame_1024_max", "Frame Size 1024 - Max"},
298 [MVNETA_MIB_TX_GOOD_OCT_IDX] = {MVNETA_MIB_TX_GOOD_OCT, 1,
299 "tx_good_oct", "Good Octets Tx"},
300 [MVNETA_MIB_TX_GOOD_FRAME_IDX] = {MVNETA_MIB_TX_GOOD_FRAME, 0,
301 "tx_good_frame", "Good Frames Tx"},
302 [MVNETA_MIB_TX_EXCES_COL_IDX] = {MVNETA_MIB_TX_EXCES_COL, 0,
303 "tx_exces_collision", "Excessive Collision"},
304 [MVNETA_MIB_TX_MCAST_FRAME_IDX] = {MVNETA_MIB_TX_MCAST_FRAME, 0,
305 "tx_mcast_frame", "Multicast Frames Tx"},
306 [MVNETA_MIB_TX_BCAST_FRAME_IDX] = {MVNETA_MIB_TX_BCAST_FRAME, 0,
307 "tx_bcast_frame", "Broadcast Frames Tx"},
308 [MVNETA_MIB_TX_MAC_CTL_ERR_IDX] = {MVNETA_MIB_TX_MAC_CTL_ERR, 0,
309 "tx_mac_ctl_err", "Unknown MAC Control"},
310 [MVNETA_MIB_FC_SENT_IDX] = {MVNETA_MIB_FC_SENT, 0,
311 "fc_tx", "Flow Control Tx"},
312 [MVNETA_MIB_FC_GOOD_IDX] = {MVNETA_MIB_FC_GOOD, 0,
313 "fc_rx_good", "Good Flow Control Rx"},
314 [MVNETA_MIB_FC_BAD_IDX] = {MVNETA_MIB_FC_BAD, 0,
315 "fc_rx_bad", "Bad Flow Control Rx"},
316 [MVNETA_MIB_PKT_UNDERSIZE_IDX] = {MVNETA_MIB_PKT_UNDERSIZE, 0,
317 "pkt_undersize", "Undersized Packets Rx"},
318 [MVNETA_MIB_PKT_FRAGMENT_IDX] = {MVNETA_MIB_PKT_FRAGMENT, 0,
319 "pkt_fragment", "Fragmented Packets Rx"},
320 [MVNETA_MIB_PKT_OVERSIZE_IDX] = {MVNETA_MIB_PKT_OVERSIZE, 0,
321 "pkt_oversize", "Oversized Packets Rx"},
322 [MVNETA_MIB_PKT_JABBER_IDX] = {MVNETA_MIB_PKT_JABBER, 0,
323 "pkt_jabber", "Jabber Packets Rx"},
324 [MVNETA_MIB_MAC_RX_ERR_IDX] = {MVNETA_MIB_MAC_RX_ERR, 0,
325 "mac_rx_err", "MAC Rx Errors"},
326 [MVNETA_MIB_MAC_CRC_ERR_IDX] = {MVNETA_MIB_MAC_CRC_ERR, 0,
327 "mac_crc_err", "MAC CRC Errors"},
328 [MVNETA_MIB_MAC_COL_IDX] = {MVNETA_MIB_MAC_COL, 0,
329 "mac_collision", "MAC Collision"},
330 [MVNETA_MIB_MAC_LATE_COL_IDX] = {MVNETA_MIB_MAC_LATE_COL, 0,
331 "mac_late_collision", "MAC Late Collision"},
334 static struct resource_spec res_spec[] = {
335 { SYS_RES_MEMORY, 0, RF_ACTIVE },
336 { SYS_RES_IRQ, 0, RF_ACTIVE },
341 driver_intr_t *handler;
344 { mvneta_rxtxth_intr, "MVNETA aggregated interrupt" },
350 #if defined(__aarch64__)
351 return (A3700_TCLK_250MHZ);
358 mvneta_set_mac_address(struct mvneta_softc *sc, uint8_t *addr)
363 mac_l = (addr[4] << 8) | (addr[5]);
364 mac_h = (addr[0] << 24) | (addr[1] << 16) |
365 (addr[2] << 8) | (addr[3] << 0);
367 MVNETA_WRITE(sc, MVNETA_MACAL, mac_l);
368 MVNETA_WRITE(sc, MVNETA_MACAH, mac_h);
373 mvneta_get_mac_address(struct mvneta_softc *sc, uint8_t *addr)
375 uint32_t mac_l, mac_h;
378 if (mvneta_fdt_mac_address(sc, addr) == 0)
382 * Fall back -- use the currently programmed address.
384 mac_l = MVNETA_READ(sc, MVNETA_MACAL);
385 mac_h = MVNETA_READ(sc, MVNETA_MACAH);
386 if (mac_l == 0 && mac_h == 0) {
388 * Generate pseudo-random MAC.
389 * Set lower part to random number | unit number.
391 mac_l = arc4random() & ~0xff;
392 mac_l |= device_get_unit(sc->dev) & 0xff;
393 mac_h = arc4random();
394 mac_h &= ~(3 << 24); /* Clear multicast and LAA bits */
396 device_printf(sc->dev,
397 "Could not acquire MAC address. "
398 "Using randomized one.\n");
402 addr[0] = (mac_h & 0xff000000) >> 24;
403 addr[1] = (mac_h & 0x00ff0000) >> 16;
404 addr[2] = (mac_h & 0x0000ff00) >> 8;
405 addr[3] = (mac_h & 0x000000ff);
406 addr[4] = (mac_l & 0x0000ff00) >> 8;
407 addr[5] = (mac_l & 0x000000ff);
412 mvneta_has_switch(device_t self)
415 return (mvneta_has_switch_fdt(self));
422 mvneta_dma_create(struct mvneta_softc *sc)
424 size_t maxsize, maxsegsz;
431 maxsize = maxsegsz = sizeof(struct mvneta_tx_desc) * MVNETA_TX_RING_CNT;
433 error = bus_dma_tag_create(
434 bus_get_dma_tag(sc->dev), /* parent */
435 16, 0, /* alignment, boundary */
436 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
437 BUS_SPACE_MAXADDR, /* highaddr */
438 NULL, NULL, /* filtfunc, filtfuncarg */
439 maxsize, /* maxsize */
441 maxsegsz, /* maxsegsz */
443 NULL, NULL, /* lockfunc, lockfuncarg */
444 &sc->tx_dtag); /* dmat */
446 device_printf(sc->dev,
447 "Failed to create DMA tag for Tx descriptors.\n");
450 error = bus_dma_tag_create(
451 bus_get_dma_tag(sc->dev), /* parent */
452 1, 0, /* alignment, boundary */
453 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
454 BUS_SPACE_MAXADDR, /* highaddr */
455 NULL, NULL, /* filtfunc, filtfuncarg */
456 MVNETA_MAX_FRAME, /* maxsize */
457 MVNETA_TX_SEGLIMIT, /* nsegments */
458 MVNETA_MAX_FRAME, /* maxsegsz */
459 BUS_DMA_ALLOCNOW, /* flags */
460 NULL, NULL, /* lockfunc, lockfuncarg */
463 device_printf(sc->dev,
464 "Failed to create DMA tag for Tx mbufs.\n");
468 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
469 error = mvneta_ring_alloc_tx_queue(sc, q);
471 device_printf(sc->dev,
472 "Failed to allocate DMA safe memory for TxQ: %zu\n", q);
480 /* Create tag for Rx descripors */
481 error = bus_dma_tag_create(
482 bus_get_dma_tag(sc->dev), /* parent */
483 32, 0, /* alignment, boundary */
484 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
485 BUS_SPACE_MAXADDR, /* highaddr */
486 NULL, NULL, /* filtfunc, filtfuncarg */
487 sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT, /* maxsize */
489 sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT, /* maxsegsz */
491 NULL, NULL, /* lockfunc, lockfuncarg */
492 &sc->rx_dtag); /* dmat */
494 device_printf(sc->dev,
495 "Failed to create DMA tag for Rx descriptors.\n");
499 /* Create tag for Rx buffers */
500 error = bus_dma_tag_create(
501 bus_get_dma_tag(sc->dev), /* parent */
502 32, 0, /* alignment, boundary */
503 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
504 BUS_SPACE_MAXADDR, /* highaddr */
505 NULL, NULL, /* filtfunc, filtfuncarg */
506 MVNETA_MAX_FRAME, 1, /* maxsize, nsegments */
507 MVNETA_MAX_FRAME, /* maxsegsz */
509 NULL, NULL, /* lockfunc, lockfuncarg */
510 &sc->rxbuf_dtag); /* dmat */
512 device_printf(sc->dev,
513 "Failed to create DMA tag for Rx buffers.\n");
517 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
518 if (mvneta_ring_alloc_rx_queue(sc, q) != 0) {
519 device_printf(sc->dev,
520 "Failed to allocate DMA safe memory for RxQ: %zu\n", q);
527 mvneta_detach(sc->dev);
534 mvneta_attach(device_t self)
536 struct mvneta_softc *sc;
541 #if !defined(__aarch64__)
545 sc = device_get_softc(self);
548 mtx_init(&sc->mtx, "mvneta_sc", NULL, MTX_DEF);
550 error = bus_alloc_resources(self, res_spec, sc->res);
552 device_printf(self, "could not allocate resources\n");
556 sc->version = MVNETA_READ(sc, MVNETA_PV);
557 device_printf(self, "version is %x\n", sc->version);
558 callout_init(&sc->tick_ch, 0);
561 * make sure DMA engines are in reset state
563 MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000001);
564 MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000001);
566 #if !defined(__aarch64__)
568 * Disable port snoop for buffers and descriptors
569 * to avoid L2 caching of both without DRAM copy.
570 * Obtain coherency settings from the first MBUS
573 if ((MVNETA_READ(sc, MV_WIN_NETA_BASE(0)) & IO_WIN_COH_ATTR_MASK) == 0) {
574 reg = MVNETA_READ(sc, MVNETA_PSNPCFG);
575 reg &= ~MVNETA_PSNPCFG_DESCSNP_MASK;
576 reg &= ~MVNETA_PSNPCFG_BUFSNP_MASK;
577 MVNETA_WRITE(sc, MVNETA_PSNPCFG, reg);
581 error = bus_setup_intr(self, sc->res[1],
582 INTR_TYPE_NET | INTR_MPSAFE, NULL, mvneta_intrs[0].handler, sc,
585 device_printf(self, "could not setup %s\n",
586 mvneta_intrs[0].description);
594 if (mvneta_get_mac_address(sc, sc->enaddr)) {
595 device_printf(self, "no mac address.\n");
598 mvneta_set_mac_address(sc, sc->enaddr);
600 mvneta_disable_intr(sc);
602 /* Allocate network interface */
603 ifp = sc->ifp = if_alloc(IFT_ETHER);
605 device_printf(self, "if_alloc() failed\n");
609 if_initname(ifp, device_get_name(self), device_get_unit(self));
612 * We can support 802.1Q VLAN-sized frames and jumbo
615 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_JUMBO_MTU;
618 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
619 #ifdef MVNETA_MULTIQUEUE
620 ifp->if_transmit = mvneta_transmit;
621 ifp->if_qflush = mvneta_qflush;
622 #else /* !MVNETA_MULTIQUEUE */
623 ifp->if_start = mvneta_start;
624 ifp->if_snd.ifq_drv_maxlen = MVNETA_TX_RING_CNT - 1;
625 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
626 IFQ_SET_READY(&ifp->if_snd);
628 ifp->if_init = mvneta_init;
629 ifp->if_ioctl = mvneta_ioctl;
632 * We can do IPv4/TCPv4/UDPv4/TCPv6/UDPv6 checksums in hardware.
634 ifp->if_capabilities |= IFCAP_HWCSUM;
637 * As VLAN hardware tagging is not supported
638 * but is necessary to perform VLAN hardware checksums,
639 * it is done in the driver
641 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
644 * Currently IPv6 HW checksum is broken, so make sure it is disabled.
646 ifp->if_capabilities &= ~IFCAP_HWCSUM_IPV6;
647 ifp->if_capenable = ifp->if_capabilities;
650 * Disabled option(s):
651 * - Support for Large Receive Offload
653 ifp->if_capabilities |= IFCAP_LRO;
655 ifp->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP;
657 sc->rx_frame_size = MCLBYTES; /* ether_ifattach() always sets normal mtu */
660 * Device DMA Buffer allocation.
661 * Handles resource deallocation in case of failure.
663 error = mvneta_dma_create(sc);
669 /* Initialize queues */
670 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
671 error = mvneta_ring_init_tx_queue(sc, q);
678 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
679 error = mvneta_ring_init_rx_queue(sc, q);
687 * Enable DMA engines and Initialize Device Registers.
689 MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000000);
690 MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000000);
691 MVNETA_WRITE(sc, MVNETA_PACC, MVNETA_PACC_ACCELERATIONMODE_EDM);
693 mvneta_filter_setup(sc);
694 mvneta_sc_unlock(sc);
698 * Now MAC is working, setup MII.
702 * MII bus is shared by all MACs and all PHYs in SoC.
703 * serializing the bus access should be safe.
705 mtx_init(&mii_mutex, "mvneta_mii", NULL, MTX_DEF);
710 if ((sc->phy_addr != MII_PHY_ANY) && (!sc->use_inband_status)) {
711 error = mii_attach(self, &sc->miibus, ifp, mvneta_mediachange,
712 mvneta_mediastatus, BMSR_DEFCAPMASK, sc->phy_addr,
717 "MII attach failed, error: %d\n", error);
719 ether_ifdetach(sc->ifp);
723 sc->mii = device_get_softc(sc->miibus);
724 sc->phy_attached = 1;
726 /* Disable auto-negotiation in MAC - rely on PHY layer */
727 mvneta_update_autoneg(sc, FALSE);
728 } else if (sc->use_inband_status == TRUE) {
729 /* In-band link status */
730 ifmedia_init(&sc->mvneta_ifmedia, 0, mvneta_mediachange,
733 /* Configure media */
734 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX,
736 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL);
737 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX,
739 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_10_T, 0, NULL);
740 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX,
742 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
743 ifmedia_set(&sc->mvneta_ifmedia, IFM_ETHER | IFM_AUTO);
745 /* Enable auto-negotiation */
746 mvneta_update_autoneg(sc, TRUE);
749 if (MVNETA_IS_LINKUP(sc))
753 mvneta_sc_unlock(sc);
756 /* Fixed-link, use predefined values */
757 mvneta_update_autoneg(sc, FALSE);
758 ifmedia_init(&sc->mvneta_ifmedia, 0, mvneta_mediachange,
761 ifm_target = IFM_ETHER;
762 switch (sc->phy_speed) {
764 if (sc->phy_mode != MVNETA_PHY_SGMII &&
765 sc->phy_mode != MVNETA_PHY_QSGMII) {
767 "2.5G speed can work only in (Q)SGMII mode\n");
768 ether_ifdetach(sc->ifp);
772 ifm_target |= IFM_2500_T;
775 ifm_target |= IFM_1000_T;
778 ifm_target |= IFM_100_TX;
781 ifm_target |= IFM_10_T;
784 ether_ifdetach(sc->ifp);
790 ifm_target |= IFM_FDX;
792 ifm_target |= IFM_HDX;
794 ifmedia_add(&sc->mvneta_ifmedia, ifm_target, 0, NULL);
795 ifmedia_set(&sc->mvneta_ifmedia, ifm_target);
796 if_link_state_change(sc->ifp, LINK_STATE_UP);
798 if (mvneta_has_switch(self)) {
800 device_printf(self, "This device is attached to a switch\n");
801 child = device_add_child(sc->dev, "mdio", -1);
803 ether_ifdetach(sc->ifp);
807 bus_generic_attach(sc->dev);
808 bus_generic_attach(child);
811 /* Configure MAC media */
812 mvneta_update_media(sc, ifm_target);
815 ether_ifattach(ifp, sc->enaddr);
817 callout_reset(&sc->tick_ch, 0, mvneta_tick, sc);
819 sysctl_mvneta_init(sc);
825 mvneta_detach(device_t dev)
827 struct mvneta_softc *sc;
831 sc = device_get_softc(dev);
834 if (device_is_attached(dev)) {
836 callout_drain(&sc->tick_ch);
837 ether_ifdetach(sc->ifp);
840 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++)
841 mvneta_ring_dealloc_rx_queue(sc, q);
842 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++)
843 mvneta_ring_dealloc_tx_queue(sc, q);
845 device_delete_children(dev);
847 if (sc->ih_cookie[0] != NULL)
848 bus_teardown_intr(dev, sc->res[1], sc->ih_cookie[0]);
850 if (sc->tx_dtag != NULL)
851 bus_dma_tag_destroy(sc->tx_dtag);
852 if (sc->rx_dtag != NULL)
853 bus_dma_tag_destroy(sc->rx_dtag);
854 if (sc->txmbuf_dtag != NULL)
855 bus_dma_tag_destroy(sc->txmbuf_dtag);
856 if (sc->rxbuf_dtag != NULL)
857 bus_dma_tag_destroy(sc->rxbuf_dtag);
859 bus_release_resources(dev, res_spec, sc->res);
864 if (mtx_initialized(&sc->mtx))
865 mtx_destroy(&sc->mtx);
874 mvneta_miibus_readreg(device_t dev, int phy, int reg)
876 struct mvneta_softc *sc;
881 sc = device_get_softc(dev);
884 mtx_lock(&mii_mutex);
886 for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
887 if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
891 if (i == MVNETA_PHY_TIMEOUT) {
892 if_printf(ifp, "SMI busy timeout\n");
893 mtx_unlock(&mii_mutex);
897 smi = MVNETA_SMI_PHYAD(phy) |
898 MVNETA_SMI_REGAD(reg) | MVNETA_SMI_OPCODE_READ;
899 MVNETA_WRITE(sc, MVNETA_SMI, smi);
901 for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
902 if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
907 if (i == MVNETA_PHY_TIMEOUT) {
908 if_printf(ifp, "SMI busy timeout\n");
909 mtx_unlock(&mii_mutex);
912 for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
913 smi = MVNETA_READ(sc, MVNETA_SMI);
914 if (smi & MVNETA_SMI_READVALID)
919 if (i == MVNETA_PHY_TIMEOUT) {
920 if_printf(ifp, "SMI busy timeout\n");
921 mtx_unlock(&mii_mutex);
925 mtx_unlock(&mii_mutex);
928 CTR3(KTR_SPARE2, "%s i=%d, timeout=%d\n", ifp->if_xname, i,
932 val = smi & MVNETA_SMI_DATA_MASK;
935 CTR4(KTR_SPARE2, "%s phy=%d, reg=%#x, val=%#x\n", ifp->if_xname, phy,
942 mvneta_miibus_writereg(device_t dev, int phy, int reg, int val)
944 struct mvneta_softc *sc;
949 sc = device_get_softc(dev);
952 CTR4(KTR_SPARE2, "%s phy=%d, reg=%#x, val=%#x\n", ifp->if_xname,
956 mtx_lock(&mii_mutex);
958 for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
959 if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
963 if (i == MVNETA_PHY_TIMEOUT) {
964 if_printf(ifp, "SMI busy timeout\n");
965 mtx_unlock(&mii_mutex);
969 smi = MVNETA_SMI_PHYAD(phy) | MVNETA_SMI_REGAD(reg) |
970 MVNETA_SMI_OPCODE_WRITE | (val & MVNETA_SMI_DATA_MASK);
971 MVNETA_WRITE(sc, MVNETA_SMI, smi);
973 for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
974 if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
979 mtx_unlock(&mii_mutex);
981 if (i == MVNETA_PHY_TIMEOUT)
982 if_printf(ifp, "phy write timed out\n");
988 mvneta_portup(struct mvneta_softc *sc)
992 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
993 mvneta_rx_lockq(sc, q);
994 mvneta_rx_queue_enable(sc->ifp, q);
995 mvneta_rx_unlockq(sc, q);
998 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
999 mvneta_tx_lockq(sc, q);
1000 mvneta_tx_queue_enable(sc->ifp, q);
1001 mvneta_tx_unlockq(sc, q);
1007 mvneta_portdown(struct mvneta_softc *sc)
1009 struct mvneta_rx_ring *rx;
1010 struct mvneta_tx_ring *tx;
1014 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
1015 rx = MVNETA_RX_RING(sc, q);
1016 mvneta_rx_lockq(sc, q);
1017 rx->queue_status = MVNETA_QUEUE_DISABLED;
1018 mvneta_rx_unlockq(sc, q);
1021 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1022 tx = MVNETA_TX_RING(sc, q);
1023 mvneta_tx_lockq(sc, q);
1024 tx->queue_status = MVNETA_QUEUE_DISABLED;
1025 mvneta_tx_unlockq(sc, q);
1028 /* Wait for all Rx activity to terminate. */
1029 reg = MVNETA_READ(sc, MVNETA_RQC) & MVNETA_RQC_EN_MASK;
1030 reg = MVNETA_RQC_DIS(reg);
1031 MVNETA_WRITE(sc, MVNETA_RQC, reg);
1034 if (cnt >= RX_DISABLE_TIMEOUT) {
1036 "timeout for RX stopped. rqc 0x%x\n", reg);
1040 reg = MVNETA_READ(sc, MVNETA_RQC);
1041 } while ((reg & MVNETA_RQC_EN_MASK) != 0);
1043 /* Wait for all Tx activity to terminate. */
1044 reg = MVNETA_READ(sc, MVNETA_PIE);
1045 reg &= ~MVNETA_PIE_TXPKTINTRPTENB_MASK;
1046 MVNETA_WRITE(sc, MVNETA_PIE, reg);
1048 reg = MVNETA_READ(sc, MVNETA_PRXTXTIM);
1049 reg &= ~MVNETA_PRXTXTI_TBTCQ_MASK;
1050 MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg);
1052 reg = MVNETA_READ(sc, MVNETA_TQC) & MVNETA_TQC_EN_MASK;
1053 reg = MVNETA_TQC_DIS(reg);
1054 MVNETA_WRITE(sc, MVNETA_TQC, reg);
1057 if (cnt >= TX_DISABLE_TIMEOUT) {
1059 "timeout for TX stopped. tqc 0x%x\n", reg);
1063 reg = MVNETA_READ(sc, MVNETA_TQC);
1064 } while ((reg & MVNETA_TQC_EN_MASK) != 0);
1066 /* Wait for all Tx FIFO is empty */
1069 if (cnt >= TX_FIFO_EMPTY_TIMEOUT) {
1071 "timeout for TX FIFO drained. ps0 0x%x\n", reg);
1075 reg = MVNETA_READ(sc, MVNETA_PS0);
1076 } while (((reg & MVNETA_PS0_TXFIFOEMP) == 0) &&
1077 ((reg & MVNETA_PS0_TXINPROG) != 0));
1081 * Device Register Initialization
1082 * reset device registers to device driver default value.
1083 * the device is not enabled here.
1086 mvneta_initreg(struct ifnet *ifp)
1088 struct mvneta_softc *sc;
1094 CTR1(KTR_SPARE2, "%s initializing device register", ifp->if_xname);
1097 /* Disable Legacy WRR, Disable EJP, Release from reset. */
1098 MVNETA_WRITE(sc, MVNETA_TQC_1, 0);
1099 /* Enable mbus retry. */
1100 MVNETA_WRITE(sc, MVNETA_MBUS_CONF, MVNETA_MBUS_RETRY_EN);
1102 /* Init TX/RX Queue Registers */
1103 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
1104 mvneta_rx_lockq(sc, q);
1105 if (mvneta_rx_queue_init(ifp, q) != 0) {
1106 device_printf(sc->dev,
1107 "initialization failed: cannot initialize queue\n");
1108 mvneta_rx_unlockq(sc, q);
1111 mvneta_rx_unlockq(sc, q);
1113 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1114 mvneta_tx_lockq(sc, q);
1115 if (mvneta_tx_queue_init(ifp, q) != 0) {
1116 device_printf(sc->dev,
1117 "initialization failed: cannot initialize queue\n");
1118 mvneta_tx_unlockq(sc, q);
1121 mvneta_tx_unlockq(sc, q);
1125 * Ethernet Unit Control - disable automatic PHY management by HW.
1126 * In case the port uses SMI-controlled PHY, poll its status with
1127 * mii_tick() and update MAC settings accordingly.
1129 reg = MVNETA_READ(sc, MVNETA_EUC);
1130 reg &= ~MVNETA_EUC_POLLING;
1131 MVNETA_WRITE(sc, MVNETA_EUC, reg);
1133 /* EEE: Low Power Idle */
1134 reg = MVNETA_LPIC0_LILIMIT(MVNETA_LPI_LI);
1135 reg |= MVNETA_LPIC0_TSLIMIT(MVNETA_LPI_TS);
1136 MVNETA_WRITE(sc, MVNETA_LPIC0, reg);
1138 reg = MVNETA_LPIC1_TWLIMIT(MVNETA_LPI_TW);
1139 MVNETA_WRITE(sc, MVNETA_LPIC1, reg);
1141 reg = MVNETA_LPIC2_MUSTSET;
1142 MVNETA_WRITE(sc, MVNETA_LPIC2, reg);
1144 /* Port MAC Control set 0 */
1145 reg = MVNETA_PMACC0_MUSTSET; /* must write 0x1 */
1146 reg &= ~MVNETA_PMACC0_PORTEN; /* port is still disabled */
1147 reg |= MVNETA_PMACC0_FRAMESIZELIMIT(ifp->if_mtu + MVNETA_ETHER_SIZE);
1148 MVNETA_WRITE(sc, MVNETA_PMACC0, reg);
1150 /* Port MAC Control set 2 */
1151 reg = MVNETA_READ(sc, MVNETA_PMACC2);
1152 switch (sc->phy_mode) {
1153 case MVNETA_PHY_QSGMII:
1154 reg |= (MVNETA_PMACC2_PCSEN | MVNETA_PMACC2_RGMIIEN);
1155 MVNETA_WRITE(sc, MVNETA_PSERDESCFG, MVNETA_PSERDESCFG_QSGMII);
1157 case MVNETA_PHY_SGMII:
1158 reg |= (MVNETA_PMACC2_PCSEN | MVNETA_PMACC2_RGMIIEN);
1159 MVNETA_WRITE(sc, MVNETA_PSERDESCFG, MVNETA_PSERDESCFG_SGMII);
1161 case MVNETA_PHY_RGMII:
1162 case MVNETA_PHY_RGMII_ID:
1163 reg |= MVNETA_PMACC2_RGMIIEN;
1166 reg |= MVNETA_PMACC2_MUSTSET;
1167 reg &= ~MVNETA_PMACC2_PORTMACRESET;
1168 MVNETA_WRITE(sc, MVNETA_PMACC2, reg);
1170 /* Port Configuration Extended: enable Tx CRC generation */
1171 reg = MVNETA_READ(sc, MVNETA_PXCX);
1172 reg &= ~MVNETA_PXCX_TXCRCDIS;
1173 MVNETA_WRITE(sc, MVNETA_PXCX, reg);
1175 /* clear MIB counter registers(clear by read) */
1177 mvneta_clear_mib(sc);
1178 mvneta_sc_unlock(sc);
1180 /* Set SDC register except IPGINT bits */
1181 reg = MVNETA_SDC_RXBSZ_16_64BITWORDS;
1182 reg |= MVNETA_SDC_TXBSZ_16_64BITWORDS;
1183 reg |= MVNETA_SDC_BLMR;
1184 reg |= MVNETA_SDC_BLMT;
1185 MVNETA_WRITE(sc, MVNETA_SDC, reg);
1191 mvneta_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1196 *(bus_addr_t *)arg = segs->ds_addr;
1200 mvneta_ring_alloc_rx_queue(struct mvneta_softc *sc, int q)
1202 struct mvneta_rx_ring *rx;
1203 struct mvneta_buf *rxbuf;
1207 if (q >= MVNETA_RX_QNUM_MAX)
1210 rx = MVNETA_RX_RING(sc, q);
1211 mtx_init(&rx->ring_mtx, "mvneta_rx", NULL, MTX_DEF);
1212 /* Allocate DMA memory for Rx descriptors */
1213 error = bus_dmamem_alloc(sc->rx_dtag,
1214 (void**)&(rx->desc),
1215 BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1217 if (error != 0 || rx->desc == NULL)
1219 error = bus_dmamap_load(sc->rx_dtag, rx->desc_map,
1221 sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT,
1222 mvneta_dmamap_cb, &rx->desc_pa, BUS_DMA_NOWAIT);
1226 for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
1227 error = bus_dmamap_create(sc->rxbuf_dtag, 0, &dmap);
1229 device_printf(sc->dev,
1230 "Failed to create DMA map for Rx buffer num: %d\n", i);
1233 rxbuf = &rx->rxbuf[i];
1240 mvneta_rx_lockq(sc, q);
1241 mvneta_ring_flush_rx_queue(sc, q);
1242 mvneta_rx_unlockq(sc, q);
1243 mvneta_ring_dealloc_rx_queue(sc, q);
1244 device_printf(sc->dev, "DMA Ring buffer allocation failure.\n");
1249 mvneta_ring_alloc_tx_queue(struct mvneta_softc *sc, int q)
1251 struct mvneta_tx_ring *tx;
1254 if (q >= MVNETA_TX_QNUM_MAX)
1256 tx = MVNETA_TX_RING(sc, q);
1257 mtx_init(&tx->ring_mtx, "mvneta_tx", NULL, MTX_DEF);
1258 error = bus_dmamem_alloc(sc->tx_dtag,
1259 (void**)&(tx->desc),
1260 BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1262 if (error != 0 || tx->desc == NULL)
1264 error = bus_dmamap_load(sc->tx_dtag, tx->desc_map,
1266 sizeof(struct mvneta_tx_desc) * MVNETA_TX_RING_CNT,
1267 mvneta_dmamap_cb, &tx->desc_pa, BUS_DMA_NOWAIT);
1271 #ifdef MVNETA_MULTIQUEUE
1272 tx->br = buf_ring_alloc(MVNETA_BUFRING_SIZE, M_DEVBUF, M_NOWAIT,
1274 if (tx->br == NULL) {
1275 device_printf(sc->dev,
1276 "Could not setup buffer ring for TxQ(%d)\n", q);
1284 mvneta_tx_lockq(sc, q);
1285 mvneta_ring_flush_tx_queue(sc, q);
1286 mvneta_tx_unlockq(sc, q);
1287 mvneta_ring_dealloc_tx_queue(sc, q);
1288 device_printf(sc->dev, "DMA Ring buffer allocation failure.\n");
1293 mvneta_ring_dealloc_tx_queue(struct mvneta_softc *sc, int q)
1295 struct mvneta_tx_ring *tx;
1296 struct mvneta_buf *txbuf;
1301 if (q >= MVNETA_TX_QNUM_MAX)
1303 tx = MVNETA_TX_RING(sc, q);
1305 if (tx->taskq != NULL) {
1307 while (taskqueue_cancel(tx->taskq, &tx->task, NULL) != 0)
1308 taskqueue_drain(tx->taskq, &tx->task);
1310 #ifdef MVNETA_MULTIQUEUE
1312 drbr_free(tx->br, M_DEVBUF);
1315 if (sc->txmbuf_dtag != NULL) {
1316 for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
1317 txbuf = &tx->txbuf[i];
1318 if (txbuf->dmap != NULL) {
1319 error = bus_dmamap_destroy(sc->txmbuf_dtag,
1322 panic("%s: map busy for Tx descriptor (Q%d, %d)",
1329 if (tx->desc_pa != 0)
1330 bus_dmamap_unload(sc->tx_dtag, tx->desc_map);
1332 kva = (void *)tx->desc;
1334 bus_dmamem_free(sc->tx_dtag, tx->desc, tx->desc_map);
1336 if (mtx_name(&tx->ring_mtx) != NULL)
1337 mtx_destroy(&tx->ring_mtx);
1339 memset(tx, 0, sizeof(*tx));
1343 mvneta_ring_dealloc_rx_queue(struct mvneta_softc *sc, int q)
1345 struct mvneta_rx_ring *rx;
1346 struct lro_ctrl *lro;
1349 if (q >= MVNETA_RX_QNUM_MAX)
1352 rx = MVNETA_RX_RING(sc, q);
1354 if (rx->desc_pa != 0)
1355 bus_dmamap_unload(sc->rx_dtag, rx->desc_map);
1357 kva = (void *)rx->desc;
1359 bus_dmamem_free(sc->rx_dtag, rx->desc, rx->desc_map);
1364 if (mtx_name(&rx->ring_mtx) != NULL)
1365 mtx_destroy(&rx->ring_mtx);
1367 memset(rx, 0, sizeof(*rx));
1371 mvneta_ring_init_rx_queue(struct mvneta_softc *sc, int q)
1373 struct mvneta_rx_ring *rx;
1374 struct lro_ctrl *lro;
1377 if (q >= MVNETA_RX_QNUM_MAX)
1380 rx = MVNETA_RX_RING(sc, q);
1381 rx->dma = rx->cpu = 0;
1382 rx->queue_th_received = MVNETA_RXTH_COUNT;
1383 rx->queue_th_time = (mvneta_get_clk() / 1000) / 10; /* 0.1 [ms] */
1385 /* Initialize LRO */
1386 rx->lro_enabled = FALSE;
1387 if ((sc->ifp->if_capenable & IFCAP_LRO) != 0) {
1389 error = tcp_lro_init(lro);
1391 device_printf(sc->dev, "LRO Initialization failed!\n");
1393 rx->lro_enabled = TRUE;
1402 mvneta_ring_init_tx_queue(struct mvneta_softc *sc, int q)
1404 struct mvneta_tx_ring *tx;
1405 struct mvneta_buf *txbuf;
1408 if (q >= MVNETA_TX_QNUM_MAX)
1411 tx = MVNETA_TX_RING(sc, q);
1414 for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
1415 txbuf = &tx->txbuf[i];
1417 /* Tx handle needs DMA map for busdma_load_mbuf() */
1418 error = bus_dmamap_create(sc->txmbuf_dtag, 0,
1421 device_printf(sc->dev,
1422 "can't create dma map (tx ring %d)\n", i);
1426 tx->dma = tx->cpu = 0;
1429 tx->queue_status = MVNETA_QUEUE_DISABLED;
1430 tx->queue_hung = FALSE;
1434 TASK_INIT(&tx->task, 0, mvneta_tx_task, tx);
1435 tx->taskq = taskqueue_create_fast("mvneta_tx_taskq", M_WAITOK,
1436 taskqueue_thread_enqueue, &tx->taskq);
1437 taskqueue_start_threads(&tx->taskq, 1, PI_NET, "%s: tx_taskq(%d)",
1438 device_get_nameunit(sc->dev), q);
1444 mvneta_ring_flush_tx_queue(struct mvneta_softc *sc, int q)
1446 struct mvneta_tx_ring *tx;
1447 struct mvneta_buf *txbuf;
1450 tx = MVNETA_TX_RING(sc, q);
1451 KASSERT_TX_MTX(sc, q);
1454 for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
1455 txbuf = &tx->txbuf[i];
1456 bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
1457 if (txbuf->m != NULL) {
1462 tx->dma = tx->cpu = 0;
1467 mvneta_ring_flush_rx_queue(struct mvneta_softc *sc, int q)
1469 struct mvneta_rx_ring *rx;
1470 struct mvneta_buf *rxbuf;
1473 rx = MVNETA_RX_RING(sc, q);
1474 KASSERT_RX_MTX(sc, q);
1477 for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
1478 rxbuf = &rx->rxbuf[i];
1479 mvneta_rx_buf_free(sc, rxbuf);
1481 rx->dma = rx->cpu = 0;
1485 * Rx/Tx Queue Control
1488 mvneta_rx_queue_init(struct ifnet *ifp, int q)
1490 struct mvneta_softc *sc;
1491 struct mvneta_rx_ring *rx;
1495 KASSERT_RX_MTX(sc, q);
1496 rx = MVNETA_RX_RING(sc, q);
1497 DASSERT(rx->desc_pa != 0);
1499 /* descriptor address */
1500 MVNETA_WRITE(sc, MVNETA_PRXDQA(q), rx->desc_pa);
1502 /* Rx buffer size and descriptor ring size */
1503 reg = MVNETA_PRXDQS_BUFFERSIZE(sc->rx_frame_size >> 3);
1504 reg |= MVNETA_PRXDQS_DESCRIPTORSQUEUESIZE(MVNETA_RX_RING_CNT);
1505 MVNETA_WRITE(sc, MVNETA_PRXDQS(q), reg);
1507 CTR3(KTR_SPARE2, "%s PRXDQS(%d): %#x", ifp->if_xname, q,
1508 MVNETA_READ(sc, MVNETA_PRXDQS(q)));
1510 /* Rx packet offset address */
1511 reg = MVNETA_PRXC_PACKETOFFSET(MVNETA_PACKET_OFFSET >> 3);
1512 MVNETA_WRITE(sc, MVNETA_PRXC(q), reg);
1514 CTR3(KTR_SPARE2, "%s PRXC(%d): %#x", ifp->if_xname, q,
1515 MVNETA_READ(sc, MVNETA_PRXC(q)));
1518 /* if DMA is not working, register is not updated */
1519 DASSERT(MVNETA_READ(sc, MVNETA_PRXDQA(q)) == rx->desc_pa);
1524 mvneta_tx_queue_init(struct ifnet *ifp, int q)
1526 struct mvneta_softc *sc;
1527 struct mvneta_tx_ring *tx;
1531 KASSERT_TX_MTX(sc, q);
1532 tx = MVNETA_TX_RING(sc, q);
1533 DASSERT(tx->desc_pa != 0);
1535 /* descriptor address */
1536 MVNETA_WRITE(sc, MVNETA_PTXDQA(q), tx->desc_pa);
1538 /* descriptor ring size */
1539 reg = MVNETA_PTXDQS_DQS(MVNETA_TX_RING_CNT);
1540 MVNETA_WRITE(sc, MVNETA_PTXDQS(q), reg);
1542 /* if DMA is not working, register is not updated */
1543 DASSERT(MVNETA_READ(sc, MVNETA_PTXDQA(q)) == tx->desc_pa);
1548 mvneta_rx_queue_enable(struct ifnet *ifp, int q)
1550 struct mvneta_softc *sc;
1551 struct mvneta_rx_ring *rx;
1555 rx = MVNETA_RX_RING(sc, q);
1556 KASSERT_RX_MTX(sc, q);
1558 /* Set Rx interrupt threshold */
1559 reg = MVNETA_PRXDQTH_ODT(rx->queue_th_received);
1560 MVNETA_WRITE(sc, MVNETA_PRXDQTH(q), reg);
1562 reg = MVNETA_PRXITTH_RITT(rx->queue_th_time);
1563 MVNETA_WRITE(sc, MVNETA_PRXITTH(q), reg);
1565 /* Unmask RXTX_TH Intr. */
1566 reg = MVNETA_READ(sc, MVNETA_PRXTXTIM);
1567 reg |= MVNETA_PRXTXTI_RBICTAPQ(q); /* Rx Buffer Interrupt Coalese */
1568 MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg);
1570 /* Enable Rx queue */
1571 reg = MVNETA_READ(sc, MVNETA_RQC) & MVNETA_RQC_EN_MASK;
1572 reg |= MVNETA_RQC_ENQ(q);
1573 MVNETA_WRITE(sc, MVNETA_RQC, reg);
1575 rx->queue_status = MVNETA_QUEUE_WORKING;
1580 mvneta_tx_queue_enable(struct ifnet *ifp, int q)
1582 struct mvneta_softc *sc;
1583 struct mvneta_tx_ring *tx;
1586 tx = MVNETA_TX_RING(sc, q);
1587 KASSERT_TX_MTX(sc, q);
1589 /* Enable Tx queue */
1590 MVNETA_WRITE(sc, MVNETA_TQC, MVNETA_TQC_ENQ(q));
1592 tx->queue_status = MVNETA_QUEUE_IDLE;
1593 tx->queue_hung = FALSE;
1597 STATIC __inline void
1598 mvneta_rx_lockq(struct mvneta_softc *sc, int q)
1602 DASSERT(q < MVNETA_RX_QNUM_MAX);
1603 mtx_lock(&sc->rx_ring[q].ring_mtx);
1606 STATIC __inline void
1607 mvneta_rx_unlockq(struct mvneta_softc *sc, int q)
1611 DASSERT(q < MVNETA_RX_QNUM_MAX);
1612 mtx_unlock(&sc->rx_ring[q].ring_mtx);
1615 STATIC __inline int __unused
1616 mvneta_tx_trylockq(struct mvneta_softc *sc, int q)
1620 DASSERT(q < MVNETA_TX_QNUM_MAX);
1621 return (mtx_trylock(&sc->tx_ring[q].ring_mtx));
1624 STATIC __inline void
1625 mvneta_tx_lockq(struct mvneta_softc *sc, int q)
1629 DASSERT(q < MVNETA_TX_QNUM_MAX);
1630 mtx_lock(&sc->tx_ring[q].ring_mtx);
1633 STATIC __inline void
1634 mvneta_tx_unlockq(struct mvneta_softc *sc, int q)
1638 DASSERT(q < MVNETA_TX_QNUM_MAX);
1639 mtx_unlock(&sc->tx_ring[q].ring_mtx);
1643 * Interrupt Handlers
1646 mvneta_disable_intr(struct mvneta_softc *sc)
1649 MVNETA_WRITE(sc, MVNETA_EUIM, 0);
1650 MVNETA_WRITE(sc, MVNETA_EUIC, 0);
1651 MVNETA_WRITE(sc, MVNETA_PRXTXTIM, 0);
1652 MVNETA_WRITE(sc, MVNETA_PRXTXTIC, 0);
1653 MVNETA_WRITE(sc, MVNETA_PRXTXIM, 0);
1654 MVNETA_WRITE(sc, MVNETA_PRXTXIC, 0);
1655 MVNETA_WRITE(sc, MVNETA_PMIM, 0);
1656 MVNETA_WRITE(sc, MVNETA_PMIC, 0);
1657 MVNETA_WRITE(sc, MVNETA_PIE, 0);
1661 mvneta_enable_intr(struct mvneta_softc *sc)
1665 /* Enable Summary Bit to check all interrupt cause. */
1666 reg = MVNETA_READ(sc, MVNETA_PRXTXTIM);
1667 reg |= MVNETA_PRXTXTI_PMISCICSUMMARY;
1668 MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg);
1670 if (sc->use_inband_status) {
1671 /* Enable Port MISC Intr. (via RXTX_TH_Summary bit) */
1672 MVNETA_WRITE(sc, MVNETA_PMIM, MVNETA_PMI_PHYSTATUSCHNG |
1673 MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHANGE);
1676 /* Enable All Queue Interrupt */
1677 reg = MVNETA_READ(sc, MVNETA_PIE);
1678 reg |= MVNETA_PIE_RXPKTINTRPTENB_MASK;
1679 reg |= MVNETA_PIE_TXPKTINTRPTENB_MASK;
1680 MVNETA_WRITE(sc, MVNETA_PIE, reg);
1684 mvneta_rxtxth_intr(void *arg)
1686 struct mvneta_softc *sc;
1688 uint32_t ic, queues;
1693 CTR1(KTR_SPARE2, "%s got RXTX_TH_Intr", ifp->if_xname);
1695 ic = MVNETA_READ(sc, MVNETA_PRXTXTIC);
1698 MVNETA_WRITE(sc, MVNETA_PRXTXTIC, ~ic);
1700 /* Ack maintance interrupt first */
1701 if (__predict_false((ic & MVNETA_PRXTXTI_PMISCICSUMMARY) &&
1702 sc->use_inband_status)) {
1704 mvneta_misc_intr(sc);
1705 mvneta_sc_unlock(sc);
1707 if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING)))
1709 /* RxTxTH interrupt */
1710 queues = MVNETA_PRXTXTI_GET_RBICTAPQ(ic);
1711 if (__predict_true(queues)) {
1713 CTR1(KTR_SPARE2, "%s got PRXTXTIC: +RXEOF", ifp->if_xname);
1715 /* At the moment the driver support only one RX queue. */
1716 DASSERT(MVNETA_IS_QUEUE_SET(queues, 0));
1717 mvneta_rx(sc, 0, 0);
1722 mvneta_misc_intr(struct mvneta_softc *sc)
1728 CTR1(KTR_SPARE2, "%s got MISC_INTR", sc->ifp->if_xname);
1733 ic = MVNETA_READ(sc, MVNETA_PMIC);
1734 ic &= MVNETA_READ(sc, MVNETA_PMIM);
1737 MVNETA_WRITE(sc, MVNETA_PMIC, ~ic);
1740 if (ic & (MVNETA_PMI_PHYSTATUSCHNG |
1741 MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHANGE))
1742 mvneta_link_isr(sc);
1748 mvneta_tick(void *arg)
1750 struct mvneta_softc *sc;
1751 struct mvneta_tx_ring *tx;
1752 struct mvneta_rx_ring *rx;
1754 uint32_t fc_prev, fc_curr;
1759 * This is done before mib update to get the right stats
1762 mvneta_tx_drain(sc);
1764 /* Extract previous flow-control frame received counter. */
1765 fc_prev = sc->sysctl_mib[MVNETA_MIB_FC_GOOD_IDX].counter;
1766 /* Read mib registers (clear by read). */
1767 mvneta_update_mib(sc);
1768 /* Extract current flow-control frame received counter. */
1769 fc_curr = sc->sysctl_mib[MVNETA_MIB_FC_GOOD_IDX].counter;
1772 if (sc->phy_attached && sc->ifp->if_flags & IFF_UP) {
1776 /* Adjust MAC settings */
1777 mvneta_adjust_link(sc);
1778 mvneta_sc_unlock(sc);
1782 * We were unable to refill the rx queue and left the rx func, leaving
1783 * the ring without mbuf and no way to call the refill func.
1785 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
1786 rx = MVNETA_RX_RING(sc, q);
1787 if (rx->needs_refill == TRUE) {
1788 mvneta_rx_lockq(sc, q);
1789 mvneta_rx_queue_refill(sc, q);
1790 mvneta_rx_unlockq(sc, q);
1796 * - check if queue is mark as hung.
1797 * - ignore hung status if we received some pause frame
1798 * as hardware may have paused packet transmit.
1800 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1802 * We should take queue lock, but as we only read
1803 * queue status we can do it without lock, we may
1804 * only missdetect queue status for one tick.
1806 tx = MVNETA_TX_RING(sc, q);
1808 if (tx->queue_hung && (fc_curr - fc_prev) == 0)
1812 callout_schedule(&sc->tick_ch, hz);
1816 if_printf(sc->ifp, "watchdog timeout\n");
1819 sc->counter_watchdog++;
1820 sc->counter_watchdog_mib++;
1821 /* Trigger reinitialize sequence. */
1822 mvneta_stop_locked(sc);
1823 mvneta_init_locked(sc);
1824 mvneta_sc_unlock(sc);
1828 mvneta_qflush(struct ifnet *ifp)
1830 #ifdef MVNETA_MULTIQUEUE
1831 struct mvneta_softc *sc;
1832 struct mvneta_tx_ring *tx;
1838 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1839 tx = MVNETA_TX_RING(sc, q);
1840 mvneta_tx_lockq(sc, q);
1841 while ((m = buf_ring_dequeue_sc(tx->br)) != NULL)
1843 mvneta_tx_unlockq(sc, q);
1850 mvneta_tx_task(void *arg, int pending)
1852 struct mvneta_softc *sc;
1853 struct mvneta_tx_ring *tx;
1861 mvneta_tx_lockq(sc, tx->qidx);
1862 error = mvneta_xmit_locked(sc, tx->qidx);
1863 mvneta_tx_unlockq(sc, tx->qidx);
1866 if (__predict_false(error != 0 && error != ENETDOWN)) {
1867 pause("mvneta_tx_task_sleep", 1);
1868 taskqueue_enqueue(tx->taskq, &tx->task);
1873 mvneta_xmitfast_locked(struct mvneta_softc *sc, int q, struct mbuf **m)
1875 struct mvneta_tx_ring *tx;
1879 KASSERT_TX_MTX(sc, q);
1880 tx = MVNETA_TX_RING(sc, q);
1885 /* Dont enqueue packet if the queue is disabled. */
1886 if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED)) {
1892 /* Reclaim mbuf if above threshold. */
1893 if (__predict_true(tx->used > MVNETA_TX_RECLAIM_COUNT))
1894 mvneta_tx_queue_complete(sc, q);
1896 /* Do not call transmit path if queue is already too full. */
1897 if (__predict_false(tx->used >
1898 MVNETA_TX_RING_CNT - MVNETA_TX_SEGLIMIT))
1901 error = mvneta_tx_queue(sc, m, q);
1902 if (__predict_false(error != 0))
1905 /* Send a copy of the frame to the BPF listener */
1906 ETHER_BPF_MTAP(ifp, *m);
1908 /* Set watchdog on */
1909 tx->watchdog_time = ticks;
1910 tx->queue_status = MVNETA_QUEUE_WORKING;
1915 #ifdef MVNETA_MULTIQUEUE
1917 mvneta_transmit(struct ifnet *ifp, struct mbuf *m)
1919 struct mvneta_softc *sc;
1920 struct mvneta_tx_ring *tx;
1926 /* Use default queue if there is no flow id as thread can migrate. */
1927 if (__predict_true(M_HASHTYPE_GET(m) != M_HASHTYPE_NONE))
1928 q = m->m_pkthdr.flowid % MVNETA_TX_QNUM_MAX;
1932 tx = MVNETA_TX_RING(sc, q);
1934 /* If buf_ring is full start transmit immediatly. */
1935 if (buf_ring_full(tx->br)) {
1936 mvneta_tx_lockq(sc, q);
1937 mvneta_xmit_locked(sc, q);
1938 mvneta_tx_unlockq(sc, q);
1942 * If the buf_ring is empty we will not reorder packets.
1943 * If the lock is available transmit without using buf_ring.
1945 if (buf_ring_empty(tx->br) && mvneta_tx_trylockq(sc, q) != 0) {
1946 error = mvneta_xmitfast_locked(sc, q, &m);
1947 mvneta_tx_unlockq(sc, q);
1948 if (__predict_true(error == 0))
1951 /* Transmit can fail in fastpath. */
1952 if (__predict_false(m == NULL))
1956 /* Enqueue then schedule taskqueue. */
1957 error = drbr_enqueue(ifp, tx->br, m);
1958 if (__predict_false(error != 0))
1961 taskqueue_enqueue(tx->taskq, &tx->task);
1966 mvneta_xmit_locked(struct mvneta_softc *sc, int q)
1969 struct mvneta_tx_ring *tx;
1973 KASSERT_TX_MTX(sc, q);
1975 tx = MVNETA_TX_RING(sc, q);
1978 while ((m = drbr_peek(ifp, tx->br)) != NULL) {
1979 error = mvneta_xmitfast_locked(sc, q, &m);
1980 if (__predict_false(error != 0)) {
1982 drbr_putback(ifp, tx->br, m);
1984 drbr_advance(ifp, tx->br);
1987 drbr_advance(ifp, tx->br);
1992 #else /* !MVNETA_MULTIQUEUE */
1994 mvneta_start(struct ifnet *ifp)
1996 struct mvneta_softc *sc;
1997 struct mvneta_tx_ring *tx;
2001 tx = MVNETA_TX_RING(sc, 0);
2003 mvneta_tx_lockq(sc, 0);
2004 error = mvneta_xmit_locked(sc, 0);
2005 mvneta_tx_unlockq(sc, 0);
2006 /* Handle retransmit in the background taskq. */
2007 if (__predict_false(error != 0 && error != ENETDOWN))
2008 taskqueue_enqueue(tx->taskq, &tx->task);
2012 mvneta_xmit_locked(struct mvneta_softc *sc, int q)
2015 struct mvneta_tx_ring *tx;
2019 KASSERT_TX_MTX(sc, q);
2021 tx = MVNETA_TX_RING(sc, 0);
2024 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
2025 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
2029 error = mvneta_xmitfast_locked(sc, q, &m);
2030 if (__predict_false(error != 0)) {
2032 IFQ_DRV_PREPEND(&ifp->if_snd, m);
2042 mvneta_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2044 struct mvneta_softc *sc;
2045 struct mvneta_rx_ring *rx;
2053 ifr = (struct ifreq *)data;
2057 if (ifp->if_flags & IFF_UP) {
2058 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2059 flags = ifp->if_flags ^ sc->mvneta_if_flags;
2062 sc->mvneta_if_flags = ifp->if_flags;
2064 if ((flags & IFF_PROMISC) != 0)
2065 mvneta_filter_setup(sc);
2067 mvneta_init_locked(sc);
2068 sc->mvneta_if_flags = ifp->if_flags;
2069 if (sc->phy_attached)
2070 mii_mediachg(sc->mii);
2071 mvneta_sc_unlock(sc);
2074 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2075 mvneta_stop_locked(sc);
2077 sc->mvneta_if_flags = ifp->if_flags;
2078 mvneta_sc_unlock(sc);
2081 if (ifp->if_mtu > sc->tx_csum_limit &&
2082 ifr->ifr_reqcap & IFCAP_TXCSUM)
2083 ifr->ifr_reqcap &= ~IFCAP_TXCSUM;
2084 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
2085 if (mask & IFCAP_HWCSUM) {
2086 ifp->if_capenable &= ~IFCAP_HWCSUM;
2087 ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
2088 if (ifp->if_capenable & IFCAP_TXCSUM)
2089 ifp->if_hwassist = CSUM_IP | CSUM_TCP |
2092 ifp->if_hwassist = 0;
2094 if (mask & IFCAP_LRO) {
2096 ifp->if_capenable ^= IFCAP_LRO;
2097 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2098 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
2099 rx = MVNETA_RX_RING(sc, q);
2100 rx->lro_enabled = !rx->lro_enabled;
2103 mvneta_sc_unlock(sc);
2105 VLAN_CAPABILITIES(ifp);
2108 if ((IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T ||
2109 IFM_SUBTYPE(ifr->ifr_media) == IFM_2500_T) &&
2110 (ifr->ifr_media & IFM_FDX) == 0) {
2111 device_printf(sc->dev,
2112 "%s half-duplex unsupported\n",
2113 IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T ?
2119 case SIOCGIFMEDIA: /* FALLTHROUGH */
2121 if (!sc->phy_attached)
2122 error = ifmedia_ioctl(ifp, ifr, &sc->mvneta_ifmedia,
2125 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media,
2129 if (ifr->ifr_mtu < 68 || ifr->ifr_mtu > MVNETA_MAX_FRAME -
2130 MVNETA_ETHER_SIZE) {
2133 ifp->if_mtu = ifr->ifr_mtu;
2135 if (ifp->if_mtu + MVNETA_ETHER_SIZE <= MCLBYTES) {
2136 sc->rx_frame_size = MCLBYTES;
2138 sc->rx_frame_size = MJUM9BYTES;
2140 if (ifp->if_mtu > sc->tx_csum_limit) {
2141 ifp->if_capenable &= ~IFCAP_TXCSUM;
2142 ifp->if_hwassist = 0;
2144 ifp->if_capenable |= IFCAP_TXCSUM;
2145 ifp->if_hwassist = CSUM_IP | CSUM_TCP |
2149 * Reinitialize RX queues.
2150 * We need to update RX descriptor size.
2152 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2153 mvneta_stop_locked(sc);
2155 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
2156 mvneta_rx_lockq(sc, q);
2157 if (mvneta_rx_queue_init(ifp, q) != 0) {
2158 device_printf(sc->dev,
2159 "initialization failed:"
2160 " cannot initialize queue\n");
2161 mvneta_rx_unlockq(sc, q);
2165 mvneta_rx_unlockq(sc, q);
2167 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2168 mvneta_init_locked(sc);
2170 mvneta_sc_unlock(sc);
2175 error = ether_ioctl(ifp, cmd, data);
2183 mvneta_init_locked(void *arg)
2185 struct mvneta_softc *sc;
2193 if (!device_is_attached(sc->dev) ||
2194 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2197 mvneta_disable_intr(sc);
2198 callout_stop(&sc->tick_ch);
2200 /* Get the latest mac address */
2201 bcopy(IF_LLADDR(ifp), sc->enaddr, ETHER_ADDR_LEN);
2202 mvneta_set_mac_address(sc, sc->enaddr);
2203 mvneta_filter_setup(sc);
2205 /* Start DMA Engine */
2206 MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000000);
2207 MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000000);
2208 MVNETA_WRITE(sc, MVNETA_PACC, MVNETA_PACC_ACCELERATIONMODE_EDM);
2211 reg = MVNETA_READ(sc, MVNETA_PMACC0);
2212 reg |= MVNETA_PMACC0_PORTEN;
2213 reg &= ~MVNETA_PMACC0_FRAMESIZELIMIT_MASK;
2214 reg |= MVNETA_PMACC0_FRAMESIZELIMIT(ifp->if_mtu + MVNETA_ETHER_SIZE);
2215 MVNETA_WRITE(sc, MVNETA_PMACC0, reg);
2217 /* Allow access to each TXQ/RXQ from both CPU's */
2218 for (cpu = 0; cpu < mp_ncpus; ++cpu)
2219 MVNETA_WRITE(sc, MVNETA_PCP2Q(cpu),
2220 MVNETA_PCP2Q_TXQEN_MASK | MVNETA_PCP2Q_RXQEN_MASK);
2222 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
2223 mvneta_rx_lockq(sc, q);
2224 mvneta_rx_queue_refill(sc, q);
2225 mvneta_rx_unlockq(sc, q);
2228 if (!sc->phy_attached)
2231 /* Enable interrupt */
2232 mvneta_enable_intr(sc);
2235 callout_schedule(&sc->tick_ch, hz);
2237 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2241 mvneta_init(void *arg)
2243 struct mvneta_softc *sc;
2247 mvneta_init_locked(sc);
2248 if (sc->phy_attached)
2249 mii_mediachg(sc->mii);
2250 mvneta_sc_unlock(sc);
2255 mvneta_stop_locked(struct mvneta_softc *sc)
2258 struct mvneta_rx_ring *rx;
2259 struct mvneta_tx_ring *tx;
2264 if (ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2267 mvneta_disable_intr(sc);
2269 callout_stop(&sc->tick_ch);
2271 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2274 if (sc->linkup == TRUE)
2275 mvneta_linkdown(sc);
2277 /* Reset the MAC Port Enable bit */
2278 reg = MVNETA_READ(sc, MVNETA_PMACC0);
2279 reg &= ~MVNETA_PMACC0_PORTEN;
2280 MVNETA_WRITE(sc, MVNETA_PMACC0, reg);
2282 /* Disable each of queue */
2283 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
2284 rx = MVNETA_RX_RING(sc, q);
2286 mvneta_rx_lockq(sc, q);
2287 mvneta_ring_flush_rx_queue(sc, q);
2288 mvneta_rx_unlockq(sc, q);
2292 * Hold Reset state of DMA Engine
2293 * (must write 0x0 to restart it)
2295 MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000001);
2296 MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000001);
2298 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
2299 tx = MVNETA_TX_RING(sc, q);
2301 mvneta_tx_lockq(sc, q);
2302 mvneta_ring_flush_tx_queue(sc, q);
2303 mvneta_tx_unlockq(sc, q);
2308 mvneta_stop(struct mvneta_softc *sc)
2312 mvneta_stop_locked(sc);
2313 mvneta_sc_unlock(sc);
2317 mvneta_mediachange(struct ifnet *ifp)
2319 struct mvneta_softc *sc;
2323 if (!sc->phy_attached && !sc->use_inband_status) {
2324 /* We shouldn't be here */
2325 if_printf(ifp, "Cannot change media in fixed-link mode!\n");
2329 if (sc->use_inband_status) {
2330 mvneta_update_media(sc, sc->mvneta_ifmedia.ifm_media);
2337 mii_mediachg(sc->mii);
2339 mvneta_sc_unlock(sc);
2345 mvneta_get_media(struct mvneta_softc *sc, struct ifmediareq *ifmr)
2349 psr = MVNETA_READ(sc, MVNETA_PSR);
2352 if (psr & MVNETA_PSR_GMIISPEED)
2353 ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_1000_T);
2354 else if (psr & MVNETA_PSR_MIISPEED)
2355 ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_100_TX);
2356 else if (psr & MVNETA_PSR_LINKUP)
2357 ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_10_T);
2360 if (psr & MVNETA_PSR_FULLDX)
2361 ifmr->ifm_active |= IFM_FDX;
2364 ifmr->ifm_status = IFM_AVALID;
2365 if (psr & MVNETA_PSR_LINKUP)
2366 ifmr->ifm_status |= IFM_ACTIVE;
2370 mvneta_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2372 struct mvneta_softc *sc;
2373 struct mii_data *mii;
2377 if (!sc->phy_attached && !sc->use_inband_status) {
2378 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
2384 if (sc->use_inband_status) {
2385 mvneta_get_media(sc, ifmr);
2386 mvneta_sc_unlock(sc);
2393 ifmr->ifm_active = mii->mii_media_active;
2394 ifmr->ifm_status = mii->mii_media_status;
2396 mvneta_sc_unlock(sc);
2403 mvneta_update_autoneg(struct mvneta_softc *sc, int enable)
2408 reg = MVNETA_READ(sc, MVNETA_PANC);
2409 reg &= ~(MVNETA_PANC_FORCELINKFAIL | MVNETA_PANC_FORCELINKPASS |
2410 MVNETA_PANC_ANFCEN);
2411 reg |= MVNETA_PANC_ANDUPLEXEN | MVNETA_PANC_ANSPEEDEN |
2412 MVNETA_PANC_INBANDANEN;
2413 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2415 reg = MVNETA_READ(sc, MVNETA_PMACC2);
2416 reg |= MVNETA_PMACC2_INBANDANMODE;
2417 MVNETA_WRITE(sc, MVNETA_PMACC2, reg);
2419 reg = MVNETA_READ(sc, MVNETA_PSOMSCD);
2420 reg |= MVNETA_PSOMSCD_ENABLE;
2421 MVNETA_WRITE(sc, MVNETA_PSOMSCD, reg);
2423 reg = MVNETA_READ(sc, MVNETA_PANC);
2424 reg &= ~(MVNETA_PANC_FORCELINKFAIL | MVNETA_PANC_FORCELINKPASS |
2425 MVNETA_PANC_ANDUPLEXEN | MVNETA_PANC_ANSPEEDEN |
2426 MVNETA_PANC_INBANDANEN);
2427 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2429 reg = MVNETA_READ(sc, MVNETA_PMACC2);
2430 reg &= ~MVNETA_PMACC2_INBANDANMODE;
2431 MVNETA_WRITE(sc, MVNETA_PMACC2, reg);
2433 reg = MVNETA_READ(sc, MVNETA_PSOMSCD);
2434 reg &= ~MVNETA_PSOMSCD_ENABLE;
2435 MVNETA_WRITE(sc, MVNETA_PSOMSCD, reg);
2440 mvneta_update_media(struct mvneta_softc *sc, int media)
2449 mvneta_linkreset(sc);
2451 running = (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
2453 mvneta_stop_locked(sc);
2455 sc->autoneg = (IFM_SUBTYPE(media) == IFM_AUTO);
2457 if (sc->use_inband_status)
2458 mvneta_update_autoneg(sc, IFM_SUBTYPE(media) == IFM_AUTO);
2460 mvneta_update_eee(sc);
2461 mvneta_update_fc(sc);
2463 if (IFM_SUBTYPE(media) != IFM_AUTO) {
2464 reg = MVNETA_READ(sc, MVNETA_PANC);
2465 reg &= ~(MVNETA_PANC_SETGMIISPEED |
2466 MVNETA_PANC_SETMIISPEED |
2467 MVNETA_PANC_SETFULLDX);
2468 if (IFM_SUBTYPE(media) == IFM_1000_T ||
2469 IFM_SUBTYPE(media) == IFM_2500_T) {
2470 if ((media & IFM_FDX) == 0) {
2471 device_printf(sc->dev,
2472 "%s half-duplex unsupported\n",
2473 IFM_SUBTYPE(media) == IFM_1000_T ?
2479 reg |= MVNETA_PANC_SETGMIISPEED;
2480 } else if (IFM_SUBTYPE(media) == IFM_100_TX)
2481 reg |= MVNETA_PANC_SETMIISPEED;
2483 if (media & IFM_FDX)
2484 reg |= MVNETA_PANC_SETFULLDX;
2486 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2490 mvneta_init_locked(sc);
2491 mvneta_sc_unlock(sc);
2496 mvneta_adjust_link(struct mvneta_softc *sc)
2498 boolean_t phy_linkup;
2502 mvneta_update_eee(sc);
2503 mvneta_update_fc(sc);
2505 /* Check for link change */
2506 phy_linkup = (sc->mii->mii_media_status &
2507 (IFM_AVALID | IFM_ACTIVE)) == (IFM_AVALID | IFM_ACTIVE);
2509 if (sc->linkup != phy_linkup)
2510 mvneta_linkupdate(sc, phy_linkup);
2512 /* Don't update media on disabled link */
2516 /* Check for media type change */
2517 if (sc->mvneta_media != sc->mii->mii_media_active) {
2518 sc->mvneta_media = sc->mii->mii_media_active;
2520 reg = MVNETA_READ(sc, MVNETA_PANC);
2521 reg &= ~(MVNETA_PANC_SETGMIISPEED |
2522 MVNETA_PANC_SETMIISPEED |
2523 MVNETA_PANC_SETFULLDX);
2524 if (IFM_SUBTYPE(sc->mvneta_media) == IFM_1000_T ||
2525 IFM_SUBTYPE(sc->mvneta_media) == IFM_2500_T) {
2526 reg |= MVNETA_PANC_SETGMIISPEED;
2527 } else if (IFM_SUBTYPE(sc->mvneta_media) == IFM_100_TX)
2528 reg |= MVNETA_PANC_SETMIISPEED;
2530 if (sc->mvneta_media & IFM_FDX)
2531 reg |= MVNETA_PANC_SETFULLDX;
2533 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2538 mvneta_link_isr(struct mvneta_softc *sc)
2544 linkup = MVNETA_IS_LINKUP(sc) ? TRUE : FALSE;
2545 if (sc->linkup == linkup)
2551 mvneta_linkdown(sc);
2554 device_printf(sc->dev,
2555 "%s: link %s\n", sc->ifp->if_xname, linkup ? "up" : "down");
2560 mvneta_linkupdate(struct mvneta_softc *sc, boolean_t linkup)
2568 mvneta_linkdown(sc);
2571 device_printf(sc->dev,
2572 "%s: link %s\n", sc->ifp->if_xname, linkup ? "up" : "down");
2577 mvneta_update_eee(struct mvneta_softc *sc)
2583 /* set EEE parameters */
2584 reg = MVNETA_READ(sc, MVNETA_LPIC1);
2586 reg |= MVNETA_LPIC1_LPIRE;
2588 reg &= ~MVNETA_LPIC1_LPIRE;
2589 MVNETA_WRITE(sc, MVNETA_LPIC1, reg);
2593 mvneta_update_fc(struct mvneta_softc *sc)
2599 reg = MVNETA_READ(sc, MVNETA_PANC);
2601 /* Flow control negotiation */
2602 reg |= MVNETA_PANC_PAUSEADV;
2603 reg |= MVNETA_PANC_ANFCEN;
2605 /* Disable flow control negotiation */
2606 reg &= ~MVNETA_PANC_PAUSEADV;
2607 reg &= ~MVNETA_PANC_ANFCEN;
2610 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2614 mvneta_linkup(struct mvneta_softc *sc)
2620 if (!sc->use_inband_status) {
2621 reg = MVNETA_READ(sc, MVNETA_PANC);
2622 reg |= MVNETA_PANC_FORCELINKPASS;
2623 reg &= ~MVNETA_PANC_FORCELINKFAIL;
2624 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2627 mvneta_qflush(sc->ifp);
2630 if_link_state_change(sc->ifp, LINK_STATE_UP);
2634 mvneta_linkdown(struct mvneta_softc *sc)
2640 if (!sc->use_inband_status) {
2641 reg = MVNETA_READ(sc, MVNETA_PANC);
2642 reg &= ~MVNETA_PANC_FORCELINKPASS;
2643 reg |= MVNETA_PANC_FORCELINKFAIL;
2644 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2647 mvneta_portdown(sc);
2648 mvneta_qflush(sc->ifp);
2650 if_link_state_change(sc->ifp, LINK_STATE_DOWN);
2654 mvneta_linkreset(struct mvneta_softc *sc)
2656 struct mii_softc *mii;
2658 if (sc->phy_attached) {
2659 /* Force reset PHY */
2660 mii = LIST_FIRST(&sc->mii->mii_phys);
2670 mvneta_tx_queue(struct mvneta_softc *sc, struct mbuf **mbufp, int q)
2673 bus_dma_segment_t txsegs[MVNETA_TX_SEGLIMIT];
2674 struct mbuf *mtmp, *mbuf;
2675 struct mvneta_tx_ring *tx;
2676 struct mvneta_buf *txbuf;
2677 struct mvneta_tx_desc *t;
2679 int start, used, error, i, txnsegs;
2682 tx = MVNETA_TX_RING(sc, q);
2683 DASSERT(tx->used >= 0);
2684 DASSERT(tx->used <= MVNETA_TX_RING_CNT);
2688 if (__predict_false(mbuf->m_flags & M_VLANTAG)) {
2689 mbuf = ether_vlanencap(mbuf, mbuf->m_pkthdr.ether_vtag);
2695 mbuf->m_flags &= ~M_VLANTAG;
2699 if (__predict_false(mbuf->m_next != NULL &&
2700 (mbuf->m_pkthdr.csum_flags &
2701 (CSUM_IP | CSUM_TCP | CSUM_UDP)) != 0)) {
2702 if (M_WRITABLE(mbuf) == 0) {
2703 mtmp = m_dup(mbuf, M_NOWAIT);
2710 *mbufp = mbuf = mtmp;
2714 /* load mbuf using dmamap of 1st descriptor */
2715 txbuf = &tx->txbuf[tx->cpu];
2716 error = bus_dmamap_load_mbuf_sg(sc->txmbuf_dtag,
2717 txbuf->dmap, mbuf, txsegs, &txnsegs,
2719 if (__predict_false(error != 0)) {
2721 CTR3(KTR_SPARE2, "%s:%u bus_dmamap_load_mbuf_sg error=%d", ifp->if_xname, q, error);
2723 /* This is the only recoverable error (except EFBIG). */
2724 if (error != ENOMEM) {
2733 if (__predict_false(txnsegs <= 0
2734 || (txnsegs + tx->used) > MVNETA_TX_RING_CNT)) {
2735 /* we have no enough descriptors or mbuf is broken */
2737 CTR3(KTR_SPARE2, "%s:%u not enough descriptors txnsegs=%d",
2738 ifp->if_xname, q, txnsegs);
2740 bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
2743 DASSERT(txbuf->m == NULL);
2745 /* remember mbuf using 1st descriptor */
2747 bus_dmamap_sync(sc->txmbuf_dtag, txbuf->dmap,
2748 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2750 /* load to tx descriptors */
2753 for (i = 0; i < txnsegs; i++) {
2754 t = &tx->desc[tx->cpu];
2758 if (__predict_true(i == 0)) {
2759 /* 1st descriptor */
2760 t->command |= MVNETA_TX_CMD_W_PACKET_OFFSET(0);
2761 t->command |= MVNETA_TX_CMD_F;
2762 mvneta_tx_set_csumflag(ifp, t, mbuf);
2764 t->bufptr_pa = txsegs[i].ds_addr;
2765 t->bytecnt = txsegs[i].ds_len;
2766 tx->cpu = tx_counter_adv(tx->cpu, 1);
2771 /* t is last descriptor here */
2773 t->command |= MVNETA_TX_CMD_L|MVNETA_TX_CMD_PADDING;
2775 bus_dmamap_sync(sc->tx_dtag, tx->desc_map,
2776 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2778 while (__predict_false(used > 255)) {
2779 ptxsu = MVNETA_PTXSU_NOWD(255);
2780 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
2783 if (__predict_true(used > 0)) {
2784 ptxsu = MVNETA_PTXSU_NOWD(used);
2785 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
2791 mvneta_tx_set_csumflag(struct ifnet *ifp,
2792 struct mvneta_tx_desc *t, struct mbuf *m)
2794 struct ether_header *eh;
2796 uint32_t iphl, ipoff;
2800 csum_flags = ifp->if_hwassist & m->m_pkthdr.csum_flags;
2801 eh = mtod(m, struct ether_header *);
2803 switch (ntohs(eh->ether_type)) {
2805 ipoff = ETHER_HDR_LEN;
2807 case ETHERTYPE_VLAN:
2808 ipoff = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2814 if (__predict_true(csum_flags & (CSUM_IP|CSUM_IP_TCP|CSUM_IP_UDP))) {
2815 ip = (struct ip *)(m->m_data + ipoff);
2816 iphl = ip->ip_hl<<2;
2817 t->command |= MVNETA_TX_CMD_L3_IP4;
2819 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NONE;
2825 if (csum_flags & CSUM_IP) {
2826 t->command |= MVNETA_TX_CMD_IP4_CHECKSUM;
2830 if (csum_flags & CSUM_IP_TCP) {
2831 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG;
2832 t->command |= MVNETA_TX_CMD_L4_TCP;
2833 } else if (csum_flags & CSUM_IP_UDP) {
2834 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG;
2835 t->command |= MVNETA_TX_CMD_L4_UDP;
2837 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NONE;
2840 t->command |= MVNETA_TX_CMD_IP_HEADER_LEN(iphl >> 2);
2841 t->command |= MVNETA_TX_CMD_L3_OFFSET(ipoff);
2845 mvneta_tx_queue_complete(struct mvneta_softc *sc, int q)
2847 struct mvneta_tx_ring *tx;
2848 struct mvneta_buf *txbuf;
2849 struct mvneta_tx_desc *t;
2850 uint32_t ptxs, ptxsu, ndesc;
2853 KASSERT_TX_MTX(sc, q);
2855 tx = MVNETA_TX_RING(sc, q);
2856 if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED))
2859 ptxs = MVNETA_READ(sc, MVNETA_PTXS(q));
2860 ndesc = MVNETA_PTXS_GET_TBC(ptxs);
2862 if (__predict_false(ndesc == 0)) {
2864 tx->queue_status = MVNETA_QUEUE_IDLE;
2865 else if (tx->queue_status == MVNETA_QUEUE_WORKING &&
2866 ((ticks - tx->watchdog_time) > MVNETA_WATCHDOG))
2867 tx->queue_hung = TRUE;
2872 CTR3(KTR_SPARE2, "%s:%u tx_complete begin ndesc=%u",
2873 sc->ifp->if_xname, q, ndesc);
2876 bus_dmamap_sync(sc->tx_dtag, tx->desc_map,
2877 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2879 for (i = 0; i < ndesc; i++) {
2880 t = &tx->desc[tx->dma];
2882 if (t->flags & MVNETA_TX_F_ES)
2883 CTR3(KTR_SPARE2, "%s tx error queue %d desc %d",
2884 sc->ifp->if_xname, q, tx->dma);
2886 txbuf = &tx->txbuf[tx->dma];
2887 if (__predict_true(txbuf->m != NULL)) {
2888 DASSERT((t->command & MVNETA_TX_CMD_F) != 0);
2889 bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
2894 DASSERT((t->flags & MVNETA_TX_CMD_F) == 0);
2895 tx->dma = tx_counter_adv(tx->dma, 1);
2898 DASSERT(tx->used >= 0);
2899 DASSERT(tx->used <= MVNETA_TX_RING_CNT);
2900 while (__predict_false(ndesc > 255)) {
2901 ptxsu = MVNETA_PTXSU_NORB(255);
2902 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
2905 if (__predict_true(ndesc > 0)) {
2906 ptxsu = MVNETA_PTXSU_NORB(ndesc);
2907 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
2910 CTR5(KTR_SPARE2, "%s:%u tx_complete tx_cpu=%d tx_dma=%d tx_used=%d",
2911 sc->ifp->if_xname, q, tx->cpu, tx->dma, tx->used);
2914 tx->watchdog_time = ticks;
2917 tx->queue_status = MVNETA_QUEUE_IDLE;
2921 * Do a final TX complete when TX is idle.
2924 mvneta_tx_drain(struct mvneta_softc *sc)
2926 struct mvneta_tx_ring *tx;
2930 * Handle trailing mbuf on TX queue.
2931 * Check is done lockess to avoid TX path contention.
2933 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
2934 tx = MVNETA_TX_RING(sc, q);
2935 if ((ticks - tx->watchdog_time) > MVNETA_WATCHDOG_TXCOMP &&
2937 mvneta_tx_lockq(sc, q);
2938 mvneta_tx_queue_complete(sc, q);
2939 mvneta_tx_unlockq(sc, q);
2948 mvneta_rx(struct mvneta_softc *sc, int q, int count)
2950 uint32_t prxs, npkt;
2954 mvneta_rx_lockq(sc, q);
2955 prxs = MVNETA_READ(sc, MVNETA_PRXS(q));
2956 npkt = MVNETA_PRXS_GET_ODC(prxs);
2957 if (__predict_false(npkt == 0))
2960 if (count > 0 && npkt > count) {
2964 mvneta_rx_queue(sc, q, npkt);
2966 mvneta_rx_unlockq(sc, q);
2971 * Helper routine for updating PRXSU register of a given queue.
2972 * Handles number of processed descriptors bigger than maximum acceptable value.
2974 STATIC __inline void
2975 mvneta_prxsu_update(struct mvneta_softc *sc, int q, int processed)
2979 while (__predict_false(processed > 255)) {
2980 prxsu = MVNETA_PRXSU_NOOFPROCESSEDDESCRIPTORS(255);
2981 MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
2984 prxsu = MVNETA_PRXSU_NOOFPROCESSEDDESCRIPTORS(processed);
2985 MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
2988 static __inline void
2989 mvneta_prefetch(void *p)
2992 __builtin_prefetch(p);
2996 mvneta_rx_queue(struct mvneta_softc *sc, int q, int npkt)
2999 struct mvneta_rx_ring *rx;
3000 struct mvneta_rx_desc *r;
3001 struct mvneta_buf *rxbuf;
3003 struct lro_ctrl *lro;
3004 struct lro_entry *queued;
3006 int i, pktlen, processed, ndma;
3008 KASSERT_RX_MTX(sc, q);
3011 rx = MVNETA_RX_RING(sc, q);
3014 if (__predict_false(rx->queue_status == MVNETA_QUEUE_DISABLED))
3017 bus_dmamap_sync(sc->rx_dtag, rx->desc_map,
3018 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3020 for (i = 0; i < npkt; i++) {
3021 /* Prefetch next desc, rxbuf. */
3022 ndma = rx_counter_adv(rx->dma, 1);
3023 mvneta_prefetch(&rx->desc[ndma]);
3024 mvneta_prefetch(&rx->rxbuf[ndma]);
3026 /* get descriptor and packet */
3027 r = &rx->desc[rx->dma];
3028 rxbuf = &rx->rxbuf[rx->dma];
3032 bus_dmamap_sync(sc->rxbuf_dtag, rxbuf->dmap,
3033 BUS_DMASYNC_POSTREAD);
3034 bus_dmamap_unload(sc->rxbuf_dtag, rxbuf->dmap);
3035 /* Prefetch mbuf header. */
3039 /* Drop desc with error status or not in a single buffer. */
3040 DASSERT((r->status & (MVNETA_RX_F|MVNETA_RX_L)) ==
3041 (MVNETA_RX_F|MVNETA_RX_L));
3042 if (__predict_false((r->status & MVNETA_RX_ES) ||
3043 (r->status & (MVNETA_RX_F|MVNETA_RX_L)) !=
3044 (MVNETA_RX_F|MVNETA_RX_L)))
3048 * [ OFF | MH | PKT | CRC ]
3049 * bytecnt cover MH, PKT, CRC
3051 pktlen = r->bytecnt - ETHER_CRC_LEN - MVNETA_HWHEADER_SIZE;
3052 pktbuf = (uint8_t *)rx->rxbuf_virt_addr[rx->dma] + MVNETA_PACKET_OFFSET +
3053 MVNETA_HWHEADER_SIZE;
3055 /* Prefetch mbuf data. */
3056 mvneta_prefetch(pktbuf);
3058 /* Write value to mbuf (avoid read). */
3060 m->m_len = m->m_pkthdr.len = pktlen;
3061 m->m_pkthdr.rcvif = ifp;
3062 mvneta_rx_set_csumflag(ifp, r, m);
3064 /* Increase rx_dma before releasing the lock. */
3067 if (__predict_false(rx->lro_enabled &&
3068 ((r->status & MVNETA_RX_L3_IP) != 0) &&
3069 ((r->status & MVNETA_RX_L4_MASK) == MVNETA_RX_L4_TCP) &&
3070 (m->m_pkthdr.csum_flags &
3071 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
3072 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR))) {
3073 if (rx->lro.lro_cnt != 0) {
3074 if (tcp_lro_rx(&rx->lro, m, 0) == 0)
3079 mvneta_rx_unlockq(sc, q);
3080 (*ifp->if_input)(ifp, m);
3081 mvneta_rx_lockq(sc, q);
3083 * Check whether this queue has been disabled in the
3084 * meantime. If yes, then clear LRO and exit.
3086 if(__predict_false(rx->queue_status == MVNETA_QUEUE_DISABLED))
3089 /* Refresh receive ring to avoid stall and minimize jitter. */
3090 if (processed >= MVNETA_RX_REFILL_COUNT) {
3091 mvneta_prxsu_update(sc, q, processed);
3092 mvneta_rx_queue_refill(sc, q);
3099 /* Refresh receive ring to avoid stall and minimize jitter. */
3100 if (processed >= MVNETA_RX_REFILL_COUNT) {
3101 mvneta_prxsu_update(sc, q, processed);
3102 mvneta_rx_queue_refill(sc, q);
3107 CTR3(KTR_SPARE2, "%s:%u %u packets received", ifp->if_xname, q, npkt);
3109 /* DMA status update */
3110 mvneta_prxsu_update(sc, q, processed);
3111 /* Refill the rest of buffers if there are any to refill */
3112 mvneta_rx_queue_refill(sc, q);
3116 * Flush any outstanding LRO work
3119 while (__predict_false((queued = LIST_FIRST(&lro->lro_active)) != NULL)) {
3120 LIST_REMOVE(LIST_FIRST((&lro->lro_active)), next);
3121 tcp_lro_flush(lro, queued);
3126 mvneta_rx_buf_free(struct mvneta_softc *sc, struct mvneta_buf *rxbuf)
3129 bus_dmamap_unload(sc->rxbuf_dtag, rxbuf->dmap);
3130 /* This will remove all data at once */
3135 mvneta_rx_queue_refill(struct mvneta_softc *sc, int q)
3137 struct mvneta_rx_ring *rx;
3138 struct mvneta_rx_desc *r;
3139 struct mvneta_buf *rxbuf;
3140 bus_dma_segment_t segs;
3142 uint32_t prxs, prxsu, ndesc;
3143 int npkt, refill, nsegs, error;
3145 KASSERT_RX_MTX(sc, q);
3147 rx = MVNETA_RX_RING(sc, q);
3148 prxs = MVNETA_READ(sc, MVNETA_PRXS(q));
3149 ndesc = MVNETA_PRXS_GET_NODC(prxs) + MVNETA_PRXS_GET_ODC(prxs);
3150 refill = MVNETA_RX_RING_CNT - ndesc;
3152 CTR3(KTR_SPARE2, "%s:%u refill %u packets", sc->ifp->if_xname, q,
3155 if (__predict_false(refill <= 0))
3158 for (npkt = 0; npkt < refill; npkt++) {
3159 rxbuf = &rx->rxbuf[rx->cpu];
3160 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, sc->rx_frame_size);
3161 if (__predict_false(m == NULL)) {
3165 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3167 error = bus_dmamap_load_mbuf_sg(sc->rxbuf_dtag, rxbuf->dmap,
3168 m, &segs, &nsegs, BUS_DMA_NOWAIT);
3169 if (__predict_false(error != 0 || nsegs != 1)) {
3170 KASSERT(1, ("Failed to load Rx mbuf DMA map"));
3175 /* Add the packet to the ring */
3177 r = &rx->desc[rx->cpu];
3178 r->bufptr_pa = segs.ds_addr;
3179 rx->rxbuf_virt_addr[rx->cpu] = m->m_data;
3181 rx->cpu = rx_counter_adv(rx->cpu, 1);
3184 if (refill == MVNETA_RX_RING_CNT)
3185 rx->needs_refill = TRUE;
3189 rx->needs_refill = FALSE;
3190 bus_dmamap_sync(sc->rx_dtag, rx->desc_map, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3192 while (__predict_false(npkt > 255)) {
3193 prxsu = MVNETA_PRXSU_NOOFNEWDESCRIPTORS(255);
3194 MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
3197 if (__predict_true(npkt > 0)) {
3198 prxsu = MVNETA_PRXSU_NOOFNEWDESCRIPTORS(npkt);
3199 MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
3203 STATIC __inline void
3204 mvneta_rx_set_csumflag(struct ifnet *ifp,
3205 struct mvneta_rx_desc *r, struct mbuf *m)
3207 uint32_t csum_flags;
3210 if (__predict_false((r->status &
3211 (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP)) == 0))
3212 return; /* not a IP packet */
3215 if (__predict_true((r->status & MVNETA_RX_IP_HEADER_OK) ==
3216 MVNETA_RX_IP_HEADER_OK))
3217 csum_flags |= CSUM_L3_CALC|CSUM_L3_VALID;
3219 if (__predict_true((r->status & (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP)) ==
3220 (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP))) {
3222 switch (r->status & MVNETA_RX_L4_MASK) {
3223 case MVNETA_RX_L4_TCP:
3224 case MVNETA_RX_L4_UDP:
3225 csum_flags |= CSUM_L4_CALC;
3226 if (__predict_true((r->status &
3227 MVNETA_RX_L4_CHECKSUM_OK) == MVNETA_RX_L4_CHECKSUM_OK)) {
3228 csum_flags |= CSUM_L4_VALID;
3229 m->m_pkthdr.csum_data = htons(0xffff);
3232 case MVNETA_RX_L4_OTH:
3237 m->m_pkthdr.csum_flags = csum_flags;
3241 * MAC address filter
3244 mvneta_filter_setup(struct mvneta_softc *sc)
3247 uint32_t dfut[MVNETA_NDFUT], dfsmt[MVNETA_NDFSMT], dfomt[MVNETA_NDFOMT];
3253 memset(dfut, 0, sizeof(dfut));
3254 memset(dfsmt, 0, sizeof(dfsmt));
3255 memset(dfomt, 0, sizeof(dfomt));
3258 ifp->if_flags |= IFF_ALLMULTI;
3259 if (ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) {
3260 for (i = 0; i < MVNETA_NDFSMT; i++) {
3261 dfsmt[i] = dfomt[i] =
3262 MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3263 MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3264 MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3265 MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
3269 pxc = MVNETA_READ(sc, MVNETA_PXC);
3270 pxc &= ~(MVNETA_PXC_UPM | MVNETA_PXC_RXQ_MASK | MVNETA_PXC_RXQARP_MASK |
3271 MVNETA_PXC_TCPQ_MASK | MVNETA_PXC_UDPQ_MASK | MVNETA_PXC_BPDUQ_MASK);
3272 pxc |= MVNETA_PXC_RXQ(MVNETA_RX_QNUM_MAX-1);
3273 pxc |= MVNETA_PXC_RXQARP(MVNETA_RX_QNUM_MAX-1);
3274 pxc |= MVNETA_PXC_TCPQ(MVNETA_RX_QNUM_MAX-1);
3275 pxc |= MVNETA_PXC_UDPQ(MVNETA_RX_QNUM_MAX-1);
3276 pxc |= MVNETA_PXC_BPDUQ(MVNETA_RX_QNUM_MAX-1);
3277 pxc |= MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP;
3278 if (ifp->if_flags & IFF_BROADCAST) {
3279 pxc &= ~(MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP);
3281 if (ifp->if_flags & IFF_PROMISC) {
3282 pxc |= MVNETA_PXC_UPM;
3284 MVNETA_WRITE(sc, MVNETA_PXC, pxc);
3286 /* Set Destination Address Filter Unicast Table */
3287 if (ifp->if_flags & IFF_PROMISC) {
3288 /* pass all unicast addresses */
3289 for (i = 0; i < MVNETA_NDFUT; i++) {
3291 MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3292 MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3293 MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3294 MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
3297 i = sc->enaddr[5] & 0xf; /* last nibble */
3298 dfut[i>>2] = MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
3300 MVNETA_WRITE_REGION(sc, MVNETA_DFUT(0), dfut, MVNETA_NDFUT);
3302 /* Set Destination Address Filter Multicast Tables */
3303 MVNETA_WRITE_REGION(sc, MVNETA_DFSMT(0), dfsmt, MVNETA_NDFSMT);
3304 MVNETA_WRITE_REGION(sc, MVNETA_DFOMT(0), dfomt, MVNETA_NDFOMT);
3311 sysctl_read_mib(SYSCTL_HANDLER_ARGS)
3313 struct mvneta_sysctl_mib *arg;
3314 struct mvneta_softc *sc;
3317 arg = (struct mvneta_sysctl_mib *)arg1;
3324 if (arg->index < 0 || arg->index > MVNETA_PORTMIB_NOCOUNTER)
3329 mvneta_sc_unlock(sc);
3330 return sysctl_handle_64(oidp, &val, 0, req);
3335 sysctl_clear_mib(SYSCTL_HANDLER_ARGS)
3337 struct mvneta_softc *sc;
3341 sc = (struct mvneta_softc *)arg1;
3345 err = sysctl_handle_int(oidp, &val, 0, req);
3349 if (val < 0 || val > 1)
3354 mvneta_clear_mib(sc);
3355 mvneta_sc_unlock(sc);
3362 sysctl_set_queue_rxthtime(SYSCTL_HANDLER_ARGS)
3364 struct mvneta_sysctl_queue *arg;
3365 struct mvneta_rx_ring *rx;
3366 struct mvneta_softc *sc;
3367 uint32_t reg, time_mvtclk;
3371 arg = (struct mvneta_sysctl_queue *)arg1;
3374 if (arg->queue < 0 || arg->queue > MVNETA_RX_RING_CNT)
3376 if (arg->rxtx != MVNETA_SYSCTL_RX)
3383 /* read queue length */
3385 mvneta_rx_lockq(sc, arg->queue);
3386 rx = MVNETA_RX_RING(sc, arg->queue);
3387 time_mvtclk = rx->queue_th_time;
3388 time_us = ((uint64_t)time_mvtclk * 1000ULL * 1000ULL) / mvneta_get_clk();
3389 mvneta_rx_unlockq(sc, arg->queue);
3390 mvneta_sc_unlock(sc);
3392 err = sysctl_handle_int(oidp, &time_us, 0, req);
3397 mvneta_rx_lockq(sc, arg->queue);
3399 /* update queue length (0[sec] - 1[sec]) */
3400 if (time_us < 0 || time_us > (1000 * 1000)) {
3401 mvneta_rx_unlockq(sc, arg->queue);
3402 mvneta_sc_unlock(sc);
3406 (uint64_t)mvneta_get_clk() * (uint64_t)time_us / (1000ULL * 1000ULL);
3407 rx->queue_th_time = time_mvtclk;
3408 reg = MVNETA_PRXITTH_RITT(rx->queue_th_time);
3409 MVNETA_WRITE(sc, MVNETA_PRXITTH(arg->queue), reg);
3410 mvneta_rx_unlockq(sc, arg->queue);
3411 mvneta_sc_unlock(sc);
3417 sysctl_mvneta_init(struct mvneta_softc *sc)
3419 struct sysctl_ctx_list *ctx;
3420 struct sysctl_oid_list *children;
3421 struct sysctl_oid_list *rxchildren;
3422 struct sysctl_oid_list *qchildren, *mchildren;
3423 struct sysctl_oid *tree;
3425 struct mvneta_sysctl_queue *rxarg;
3426 #define MVNETA_SYSCTL_NAME(num) "queue" # num
3427 static const char *sysctl_queue_names[] = {
3428 MVNETA_SYSCTL_NAME(0), MVNETA_SYSCTL_NAME(1),
3429 MVNETA_SYSCTL_NAME(2), MVNETA_SYSCTL_NAME(3),
3430 MVNETA_SYSCTL_NAME(4), MVNETA_SYSCTL_NAME(5),
3431 MVNETA_SYSCTL_NAME(6), MVNETA_SYSCTL_NAME(7),
3433 #undef MVNETA_SYSCTL_NAME
3435 #ifndef NO_SYSCTL_DESCR
3436 #define MVNETA_SYSCTL_DESCR(num) "configuration parameters for queue " # num
3437 static const char *sysctl_queue_descrs[] = {
3438 MVNETA_SYSCTL_DESCR(0), MVNETA_SYSCTL_DESCR(1),
3439 MVNETA_SYSCTL_DESCR(2), MVNETA_SYSCTL_DESCR(3),
3440 MVNETA_SYSCTL_DESCR(4), MVNETA_SYSCTL_DESCR(5),
3441 MVNETA_SYSCTL_DESCR(6), MVNETA_SYSCTL_DESCR(7),
3443 #undef MVNETA_SYSCTL_DESCR
3447 ctx = device_get_sysctl_ctx(sc->dev);
3448 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
3450 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rx",
3451 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "NETA RX");
3452 rxchildren = SYSCTL_CHILDREN(tree);
3453 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "mib",
3454 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "NETA MIB");
3455 mchildren = SYSCTL_CHILDREN(tree);
3458 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "flow_control",
3459 CTLFLAG_RW, &sc->cf_fc, 0, "flow control");
3460 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lpi",
3461 CTLFLAG_RW, &sc->cf_lpi, 0, "Low Power Idle");
3466 /* dev.mvneta.[unit].mib.<mibs> */
3467 for (i = 0; i < MVNETA_PORTMIB_NOCOUNTER; i++) {
3468 struct mvneta_sysctl_mib *mib_arg = &sc->sysctl_mib[i];
3472 SYSCTL_ADD_PROC(ctx, mchildren, OID_AUTO,
3473 mvneta_mib_list[i].sysctl_name,
3474 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3475 (void *)mib_arg, 0, sysctl_read_mib, "I",
3476 mvneta_mib_list[i].desc);
3478 SYSCTL_ADD_UQUAD(ctx, mchildren, OID_AUTO, "rx_discard",
3479 CTLFLAG_RD, &sc->counter_pdfc, "Port Rx Discard Frame Counter");
3480 SYSCTL_ADD_UQUAD(ctx, mchildren, OID_AUTO, "overrun",
3481 CTLFLAG_RD, &sc->counter_pofc, "Port Overrun Frame Counter");
3482 SYSCTL_ADD_UINT(ctx, mchildren, OID_AUTO, "watchdog",
3483 CTLFLAG_RD, &sc->counter_watchdog, 0, "TX Watchdog Counter");
3485 SYSCTL_ADD_PROC(ctx, mchildren, OID_AUTO, "reset",
3486 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
3487 (void *)sc, 0, sysctl_clear_mib, "I", "Reset MIB counters");
3489 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
3490 rxarg = &sc->sysctl_rx_queue[q];
3494 rxarg->rxtx = MVNETA_SYSCTL_RX;
3496 /* hw.mvneta.mvneta[unit].rx.[queue] */
3497 tree = SYSCTL_ADD_NODE(ctx, rxchildren, OID_AUTO,
3498 sysctl_queue_names[q], CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
3499 sysctl_queue_descrs[q]);
3500 qchildren = SYSCTL_CHILDREN(tree);
3502 /* hw.mvneta.mvneta[unit].rx.[queue].threshold_timer_us */
3503 SYSCTL_ADD_PROC(ctx, qchildren, OID_AUTO, "threshold_timer_us",
3504 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, rxarg, 0,
3505 sysctl_set_queue_rxthtime, "I",
3506 "interrupt coalescing threshold timer [us]");
3514 mvneta_read_mib(struct mvneta_softc *sc, int index)
3516 struct mvneta_mib_def *mib;
3519 mib = &mvneta_mib_list[index];
3520 val = MVNETA_READ_MIB(sc, mib->regnum);
3522 val |= (uint64_t)MVNETA_READ_MIB(sc, mib->regnum + 4) << 32;
3527 mvneta_clear_mib(struct mvneta_softc *sc)
3533 for (i = 0; i < nitems(mvneta_mib_list); i++) {
3534 (void)mvneta_read_mib(sc, i);
3535 sc->sysctl_mib[i].counter = 0;
3537 MVNETA_READ(sc, MVNETA_PDFC);
3538 sc->counter_pdfc = 0;
3539 MVNETA_READ(sc, MVNETA_POFC);
3540 sc->counter_pofc = 0;
3541 sc->counter_watchdog = 0;
3545 mvneta_update_mib(struct mvneta_softc *sc)
3547 struct mvneta_tx_ring *tx;
3552 for (i = 0; i < nitems(mvneta_mib_list); i++) {
3554 val = mvneta_read_mib(sc, i);
3558 sc->sysctl_mib[i].counter += val;
3559 switch (mvneta_mib_list[i].regnum) {
3560 case MVNETA_MIB_RX_GOOD_OCT:
3561 if_inc_counter(sc->ifp, IFCOUNTER_IBYTES, val);
3563 case MVNETA_MIB_RX_BAD_FRAME:
3564 if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, val);
3566 case MVNETA_MIB_RX_GOOD_FRAME:
3567 if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, val);
3569 case MVNETA_MIB_RX_MCAST_FRAME:
3570 if_inc_counter(sc->ifp, IFCOUNTER_IMCASTS, val);
3572 case MVNETA_MIB_TX_GOOD_OCT:
3573 if_inc_counter(sc->ifp, IFCOUNTER_OBYTES, val);
3575 case MVNETA_MIB_TX_GOOD_FRAME:
3576 if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, val);
3578 case MVNETA_MIB_TX_MCAST_FRAME:
3579 if_inc_counter(sc->ifp, IFCOUNTER_OMCASTS, val);
3581 case MVNETA_MIB_MAC_COL:
3582 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, val);
3584 case MVNETA_MIB_TX_MAC_TRNS_ERR:
3585 case MVNETA_MIB_TX_EXCES_COL:
3586 case MVNETA_MIB_MAC_LATE_COL:
3587 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, val);
3592 reg = MVNETA_READ(sc, MVNETA_PDFC);
3593 sc->counter_pdfc += reg;
3594 if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, reg);
3595 reg = MVNETA_READ(sc, MVNETA_POFC);
3596 sc->counter_pofc += reg;
3597 if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, reg);
3600 if (sc->counter_watchdog_mib > 0) {
3601 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, sc->counter_watchdog_mib);
3602 sc->counter_watchdog_mib = 0;
3606 * We do not take queue locks to not disrupt TX path.
3607 * We may only miss one drv error which will be fixed at
3608 * next mib update. We may also clear counter when TX path
3609 * is incrementing it but we only do it if counter was not zero
3610 * thus we may only loose one error.
3612 for (i = 0; i < MVNETA_TX_QNUM_MAX; i++) {
3613 tx = MVNETA_TX_RING(sc, i);
3615 if (tx->drv_error > 0) {
3616 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, tx->drv_error);