2 * Copyright (c) 2007-2013 Broadcom Corporation. All rights reserved.
4 * Eric Davis <edavis@broadcom.com>
5 * David Christensen <davidch@broadcom.com>
6 * Gary Zambrano <zambrano@broadcom.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of Broadcom Corporation nor the name of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written consent.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
38 #include "bxe_stats.h"
41 #define BITS_PER_LONG 32
43 #define BITS_PER_LONG 64
47 bxe_hilo(uint32_t *hiref)
49 uint32_t lo = *(hiref + 1);
50 #if (BITS_PER_LONG == 64)
52 return (HILO_U64(hi, lo));
58 static inline uint16_t
59 bxe_get_port_stats_dma_len(struct bxe_softc *sc)
64 /* 'newest' convention - shmem2 contains the size of the port stats */
65 if (SHMEM2_HAS(sc, sizeof_port_stats)) {
66 size = SHMEM2_RD(sc, sizeof_port_stats);
71 /* prevent newer BC from causing buffer overflow */
72 if (res > sizeof(struct host_port_stats)) {
73 res = sizeof(struct host_port_stats);
78 * Older convention - all BCs support the port stats fields up until
79 * the 'not_used' field
82 res = (offsetof(struct host_port_stats, not_used) + 4);
84 /* if PFC stats are supported by the MFW, DMA them as well */
85 if (sc->devinfo.bc_ver >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) {
86 res += (offsetof(struct host_port_stats, pfc_frames_rx_lo) -
87 offsetof(struct host_port_stats, pfc_frames_tx_hi) + 4);
93 DBASSERT(sc, !(res > 2 * DMAE_LEN32_RD_MAX), ("big stats dmae length\n"));
98 * Init service functions
102 bxe_dp_stats(struct bxe_softc *sc)
112 " drv_stats_counter %d\n"
114 " stats_counters_addrs %x %x\n",
115 sc->fw_stats_req->hdr.cmd_num,
116 sc->fw_stats_req->hdr.reserved0,
117 sc->fw_stats_req->hdr.drv_stats_counter,
118 sc->fw_stats_req->hdr.reserved1,
119 sc->fw_stats_req->hdr.stats_counters_addrs.hi,
120 sc->fw_stats_req->hdr.stats_counters_addrs.lo);
122 for (i = 0; i < sc->fw_stats_req->hdr.cmd_num; i++) {
131 sc->fw_stats_req->query[i].kind,
132 sc->fw_stats_req->query[i].index,
133 sc->fw_stats_req->query[i].funcID,
134 sc->fw_stats_req->query[i].reserved,
135 sc->fw_stats_req->query[i].address.hi,
136 sc->fw_stats_req->query[i].address.lo);
141 * Post the next statistics ramrod. Protect it with the lock in
142 * order to ensure the strict order between statistics ramrods
143 * (each ramrod has a sequence number passed in a
144 * sc->fw_stats_req->hdr.drv_stats_counter and ramrods must be
148 bxe_storm_stats_post(struct bxe_softc *sc)
152 if (!sc->stats_pending) {
155 if (sc->stats_pending) {
156 BXE_STATS_UNLOCK(sc);
160 sc->fw_stats_req->hdr.drv_stats_counter =
161 htole16(sc->stats_counter++);
164 "sending statistics ramrod %d\n",
165 le16toh(sc->fw_stats_req->hdr.drv_stats_counter));
167 /* adjust the ramrod to include VF queues statistics */
168 // XXX bxe_iov_adjust_stats_req(sc);
172 /* send FW stats ramrod */
173 rc = bxe_sp_post(sc, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
174 U64_HI(sc->fw_stats_req_mapping),
175 U64_LO(sc->fw_stats_req_mapping),
176 NONE_CONNECTION_TYPE);
178 sc->stats_pending = 1;
181 BXE_STATS_UNLOCK(sc);
186 bxe_hw_stats_post(struct bxe_softc *sc)
188 struct dmae_command *dmae = &sc->stats_dmae;
189 uint32_t *stats_comp = BXE_SP(sc, stats_comp);
193 *stats_comp = DMAE_COMP_VAL;
194 if (CHIP_REV_IS_SLOW(sc)) {
198 /* Update MCP's statistics if possible */
200 memcpy(BXE_SP(sc, func_stats), &sc->func_stats,
201 sizeof(sc->func_stats));
205 if (sc->executer_idx) {
206 loader_idx = PMF_DMAE_C(sc);
207 opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
208 TRUE, DMAE_COMP_GRC);
209 opcode = bxe_dmae_opcode_clr_src_reset(opcode);
211 memset(dmae, 0, sizeof(struct dmae_command));
212 dmae->opcode = opcode;
213 dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, dmae[0]));
214 dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, dmae[0]));
215 dmae->dst_addr_lo = ((DMAE_REG_CMD_MEM +
216 sizeof(struct dmae_command) *
217 (loader_idx + 1)) >> 2);
218 dmae->dst_addr_hi = 0;
219 dmae->len = sizeof(struct dmae_command) >> 2;
220 if (CHIP_IS_E1(sc)) {
223 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx + 1] >> 2);
224 dmae->comp_addr_hi = 0;
228 bxe_post_dmae(sc, dmae, loader_idx);
229 } else if (sc->func_stx) {
231 bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc));
236 bxe_stats_comp(struct bxe_softc *sc)
238 uint32_t *stats_comp = BXE_SP(sc, stats_comp);
241 while (*stats_comp != DMAE_COMP_VAL) {
243 BLOGE(sc, "Timeout waiting for stats finished\n");
255 * Statistics service functions
259 bxe_stats_pmf_update(struct bxe_softc *sc)
261 struct dmae_command *dmae;
263 int loader_idx = PMF_DMAE_C(sc);
264 uint32_t *stats_comp = BXE_SP(sc, stats_comp);
267 if (!sc->port.pmf || !sc->port.port_stx) {
272 sc->executer_idx = 0;
274 opcode = bxe_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI, FALSE, 0);
276 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
277 dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
278 dmae->src_addr_lo = (sc->port.port_stx >> 2);
279 dmae->src_addr_hi = 0;
280 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
281 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
282 dmae->len = DMAE_LEN32_RD_MAX;
283 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
284 dmae->comp_addr_hi = 0;
287 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
288 dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
289 dmae->src_addr_lo = ((sc->port.port_stx >> 2) + DMAE_LEN32_RD_MAX);
290 dmae->src_addr_hi = 0;
291 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats) +
292 DMAE_LEN32_RD_MAX * 4);
293 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats) +
294 DMAE_LEN32_RD_MAX * 4);
295 dmae->len = (bxe_get_port_stats_dma_len(sc) - DMAE_LEN32_RD_MAX);
297 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
298 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
299 dmae->comp_val = DMAE_COMP_VAL;
302 bxe_hw_stats_post(sc);
307 bxe_port_stats_init(struct bxe_softc *sc)
309 struct dmae_command *dmae;
310 int port = SC_PORT(sc);
312 int loader_idx = PMF_DMAE_C(sc);
314 uint32_t *stats_comp = BXE_SP(sc, stats_comp);
317 if (!sc->link_vars.link_up || !sc->port.pmf) {
322 sc->executer_idx = 0;
325 opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
326 TRUE, DMAE_COMP_GRC);
328 if (sc->port.port_stx) {
329 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
330 dmae->opcode = opcode;
331 dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
332 dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
333 dmae->dst_addr_lo = sc->port.port_stx >> 2;
334 dmae->dst_addr_hi = 0;
335 dmae->len = bxe_get_port_stats_dma_len(sc);
336 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
337 dmae->comp_addr_hi = 0;
342 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
343 dmae->opcode = opcode;
344 dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, func_stats));
345 dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, func_stats));
346 dmae->dst_addr_lo = (sc->func_stx >> 2);
347 dmae->dst_addr_hi = 0;
348 dmae->len = (sizeof(struct host_func_stats) >> 2);
349 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
350 dmae->comp_addr_hi = 0;
355 opcode = bxe_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI,
356 TRUE, DMAE_COMP_GRC);
358 /* EMAC is special */
359 if (sc->link_vars.mac_type == ELINK_MAC_TYPE_EMAC) {
360 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
362 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
363 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
364 dmae->opcode = opcode;
365 dmae->src_addr_lo = (mac_addr + EMAC_REG_EMAC_RX_STAT_AC) >> 2;
366 dmae->src_addr_hi = 0;
367 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats));
368 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats));
369 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
370 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
371 dmae->comp_addr_hi = 0;
374 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
375 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
376 dmae->opcode = opcode;
377 dmae->src_addr_lo = ((mac_addr + EMAC_REG_EMAC_RX_STAT_AC_28) >> 2);
378 dmae->src_addr_hi = 0;
379 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats) +
380 offsetof(struct emac_stats,
381 rx_stat_falsecarriererrors));
382 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats) +
383 offsetof(struct emac_stats,
384 rx_stat_falsecarriererrors));
386 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
387 dmae->comp_addr_hi = 0;
390 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
391 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
392 dmae->opcode = opcode;
393 dmae->src_addr_lo = ((mac_addr + EMAC_REG_EMAC_TX_STAT_AC) >> 2);
394 dmae->src_addr_hi = 0;
395 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats) +
396 offsetof(struct emac_stats,
397 tx_stat_ifhcoutoctets));
398 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats) +
399 offsetof(struct emac_stats,
400 tx_stat_ifhcoutoctets));
401 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
402 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
403 dmae->comp_addr_hi = 0;
406 uint32_t tx_src_addr_lo, rx_src_addr_lo;
407 uint16_t rx_len, tx_len;
409 /* configure the params according to MAC type */
410 switch (sc->link_vars.mac_type) {
411 case ELINK_MAC_TYPE_BMAC:
412 mac_addr = (port) ? NIG_REG_INGRESS_BMAC1_MEM :
413 NIG_REG_INGRESS_BMAC0_MEM;
415 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
416 BIGMAC_REGISTER_TX_STAT_GTBYT */
417 if (CHIP_IS_E1x(sc)) {
419 ((mac_addr + BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2);
420 tx_len = ((8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
421 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2);
423 ((mac_addr + BIGMAC_REGISTER_RX_STAT_GR64) >> 2);
424 rx_len = ((8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
425 BIGMAC_REGISTER_RX_STAT_GR64) >> 2);
428 ((mac_addr + BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2);
429 tx_len = ((8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
430 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2);
432 ((mac_addr + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2);
433 rx_len = ((8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
434 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2);
439 case ELINK_MAC_TYPE_UMAC: /* handled by MSTAT */
440 case ELINK_MAC_TYPE_XMAC: /* handled by MSTAT */
442 mac_addr = (port) ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
443 tx_src_addr_lo = ((mac_addr + MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2);
444 rx_src_addr_lo = ((mac_addr + MSTAT_REG_RX_STAT_GR64_LO) >> 2);
446 (sizeof(sc->sp->mac_stats.mstat_stats.stats_tx) >> 2);
448 (sizeof(sc->sp->mac_stats.mstat_stats.stats_rx) >> 2);
453 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
454 dmae->opcode = opcode;
455 dmae->src_addr_lo = tx_src_addr_lo;
456 dmae->src_addr_hi = 0;
458 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats));
459 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats));
460 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
461 dmae->comp_addr_hi = 0;
465 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
466 dmae->opcode = opcode;
467 dmae->src_addr_hi = 0;
468 dmae->src_addr_lo = rx_src_addr_lo;
470 U64_LO(BXE_SP_MAPPING(sc, mac_stats) + (tx_len << 2));
472 U64_HI(BXE_SP_MAPPING(sc, mac_stats) + (tx_len << 2));
474 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
475 dmae->comp_addr_hi = 0;
480 if (!CHIP_IS_E3(sc)) {
481 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
482 dmae->opcode = opcode;
484 (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
485 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
486 dmae->src_addr_hi = 0;
487 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, nig_stats) +
488 offsetof(struct nig_stats,
489 egress_mac_pkt0_lo));
490 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, nig_stats) +
491 offsetof(struct nig_stats,
492 egress_mac_pkt0_lo));
493 dmae->len = ((2 * sizeof(uint32_t)) >> 2);
494 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
495 dmae->comp_addr_hi = 0;
498 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
499 dmae->opcode = opcode;
501 (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
502 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
503 dmae->src_addr_hi = 0;
504 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, nig_stats) +
505 offsetof(struct nig_stats,
506 egress_mac_pkt1_lo));
507 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, nig_stats) +
508 offsetof(struct nig_stats,
509 egress_mac_pkt1_lo));
510 dmae->len = ((2 * sizeof(uint32_t)) >> 2);
511 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
512 dmae->comp_addr_hi = 0;
516 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
517 dmae->opcode = bxe_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI,
518 TRUE, DMAE_COMP_PCI);
520 (port ? NIG_REG_STAT1_BRB_DISCARD :
521 NIG_REG_STAT0_BRB_DISCARD) >> 2;
522 dmae->src_addr_hi = 0;
523 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, nig_stats));
524 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, nig_stats));
525 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(uint32_t)) >> 2;
527 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
528 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
529 dmae->comp_val = DMAE_COMP_VAL;
535 bxe_func_stats_init(struct bxe_softc *sc)
537 struct dmae_command *dmae = &sc->stats_dmae;
538 uint32_t *stats_comp = BXE_SP(sc, stats_comp);
546 sc->executer_idx = 0;
547 memset(dmae, 0, sizeof(struct dmae_command));
549 dmae->opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
550 TRUE, DMAE_COMP_PCI);
551 dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, func_stats));
552 dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, func_stats));
553 dmae->dst_addr_lo = (sc->func_stx >> 2);
554 dmae->dst_addr_hi = 0;
555 dmae->len = (sizeof(struct host_func_stats) >> 2);
556 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
557 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
558 dmae->comp_val = DMAE_COMP_VAL;
564 bxe_stats_start(struct bxe_softc *sc)
567 * VFs travel through here as part of the statistics FSM, but no action
575 bxe_port_stats_init(sc);
578 else if (sc->func_stx) {
579 bxe_func_stats_init(sc);
582 bxe_hw_stats_post(sc);
583 bxe_storm_stats_post(sc);
587 bxe_stats_pmf_start(struct bxe_softc *sc)
590 bxe_stats_pmf_update(sc);
595 bxe_stats_restart(struct bxe_softc *sc)
598 * VFs travel through here as part of the statistics FSM, but no action
610 bxe_bmac_stats_update(struct bxe_softc *sc)
612 struct host_port_stats *pstats = BXE_SP(sc, port_stats);
613 struct bxe_eth_stats *estats = &sc->eth_stats;
619 if (CHIP_IS_E1x(sc)) {
620 struct bmac1_stats *new = BXE_SP(sc, mac_stats.bmac1_stats);
622 /* the macros below will use "bmac1_stats" type */
623 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
624 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
625 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
626 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
627 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
628 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
629 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
630 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
631 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
633 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
634 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
635 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
636 UPDATE_STAT64(tx_stat_gt127,
637 tx_stat_etherstatspkts65octetsto127octets);
638 UPDATE_STAT64(tx_stat_gt255,
639 tx_stat_etherstatspkts128octetsto255octets);
640 UPDATE_STAT64(tx_stat_gt511,
641 tx_stat_etherstatspkts256octetsto511octets);
642 UPDATE_STAT64(tx_stat_gt1023,
643 tx_stat_etherstatspkts512octetsto1023octets);
644 UPDATE_STAT64(tx_stat_gt1518,
645 tx_stat_etherstatspkts1024octetsto1522octets);
646 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
647 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
648 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
649 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
650 UPDATE_STAT64(tx_stat_gterr,
651 tx_stat_dot3statsinternalmactransmiterrors);
652 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
654 struct bmac2_stats *new = BXE_SP(sc, mac_stats.bmac2_stats);
655 struct bxe_fw_port_stats_old *fwstats = &sc->fw_stats_old;
657 /* the macros below will use "bmac2_stats" type */
658 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
659 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
660 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
661 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
662 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
663 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
664 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
665 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
666 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
667 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
668 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
669 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
670 UPDATE_STAT64(tx_stat_gt127,
671 tx_stat_etherstatspkts65octetsto127octets);
672 UPDATE_STAT64(tx_stat_gt255,
673 tx_stat_etherstatspkts128octetsto255octets);
674 UPDATE_STAT64(tx_stat_gt511,
675 tx_stat_etherstatspkts256octetsto511octets);
676 UPDATE_STAT64(tx_stat_gt1023,
677 tx_stat_etherstatspkts512octetsto1023octets);
678 UPDATE_STAT64(tx_stat_gt1518,
679 tx_stat_etherstatspkts1024octetsto1522octets);
680 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
681 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
682 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
683 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
684 UPDATE_STAT64(tx_stat_gterr,
685 tx_stat_dot3statsinternalmactransmiterrors);
686 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
688 /* collect PFC stats */
689 pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
690 pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
691 ADD_64(pstats->pfc_frames_tx_hi, fwstats->pfc_frames_tx_hi,
692 pstats->pfc_frames_tx_lo, fwstats->pfc_frames_tx_lo);
694 pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
695 pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
696 ADD_64(pstats->pfc_frames_rx_hi, fwstats->pfc_frames_rx_hi,
697 pstats->pfc_frames_rx_lo, fwstats->pfc_frames_rx_lo);
700 estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi;
701 estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_mac_xpf_lo;
703 estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxoffsent_hi;
704 estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo;
706 estats->pfc_frames_received_hi = pstats->pfc_frames_rx_hi;
707 estats->pfc_frames_received_lo = pstats->pfc_frames_rx_lo;
708 estats->pfc_frames_sent_hi = pstats->pfc_frames_tx_hi;
709 estats->pfc_frames_sent_lo = pstats->pfc_frames_tx_lo;
713 bxe_mstat_stats_update(struct bxe_softc *sc)
715 struct host_port_stats *pstats = BXE_SP(sc, port_stats);
716 struct bxe_eth_stats *estats = &sc->eth_stats;
717 struct mstat_stats *new = BXE_SP(sc, mac_stats.mstat_stats);
719 ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
720 ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
721 ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
722 ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
723 ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
724 ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
725 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
726 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
727 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
728 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
730 /* collect pfc stats */
731 ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
732 pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
733 ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
734 pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
736 ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
737 ADD_STAT64(stats_tx.tx_gt127, tx_stat_etherstatspkts65octetsto127octets);
738 ADD_STAT64(stats_tx.tx_gt255, tx_stat_etherstatspkts128octetsto255octets);
739 ADD_STAT64(stats_tx.tx_gt511, tx_stat_etherstatspkts256octetsto511octets);
740 ADD_STAT64(stats_tx.tx_gt1023,
741 tx_stat_etherstatspkts512octetsto1023octets);
742 ADD_STAT64(stats_tx.tx_gt1518,
743 tx_stat_etherstatspkts1024octetsto1522octets);
744 ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
746 ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
747 ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
748 ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
750 ADD_STAT64(stats_tx.tx_gterr, tx_stat_dot3statsinternalmactransmiterrors);
751 ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
753 estats->etherstatspkts1024octetsto1522octets_hi =
754 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi;
755 estats->etherstatspkts1024octetsto1522octets_lo =
756 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo;
758 estats->etherstatspktsover1522octets_hi =
759 pstats->mac_stx[1].tx_stat_mac_2047_hi;
760 estats->etherstatspktsover1522octets_lo =
761 pstats->mac_stx[1].tx_stat_mac_2047_lo;
763 ADD_64(estats->etherstatspktsover1522octets_hi,
764 pstats->mac_stx[1].tx_stat_mac_4095_hi,
765 estats->etherstatspktsover1522octets_lo,
766 pstats->mac_stx[1].tx_stat_mac_4095_lo);
768 ADD_64(estats->etherstatspktsover1522octets_hi,
769 pstats->mac_stx[1].tx_stat_mac_9216_hi,
770 estats->etherstatspktsover1522octets_lo,
771 pstats->mac_stx[1].tx_stat_mac_9216_lo);
773 ADD_64(estats->etherstatspktsover1522octets_hi,
774 pstats->mac_stx[1].tx_stat_mac_16383_hi,
775 estats->etherstatspktsover1522octets_lo,
776 pstats->mac_stx[1].tx_stat_mac_16383_lo);
778 estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi;
779 estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_mac_xpf_lo;
781 estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxoffsent_hi;
782 estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo;
784 estats->pfc_frames_received_hi = pstats->pfc_frames_rx_hi;
785 estats->pfc_frames_received_lo = pstats->pfc_frames_rx_lo;
786 estats->pfc_frames_sent_hi = pstats->pfc_frames_tx_hi;
787 estats->pfc_frames_sent_lo = pstats->pfc_frames_tx_lo;
791 bxe_emac_stats_update(struct bxe_softc *sc)
793 struct emac_stats *new = BXE_SP(sc, mac_stats.emac_stats);
794 struct host_port_stats *pstats = BXE_SP(sc, port_stats);
795 struct bxe_eth_stats *estats = &sc->eth_stats;
797 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
798 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
799 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
800 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
801 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
802 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
803 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
804 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
805 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
806 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
807 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
808 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
809 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
810 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
811 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
812 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
813 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
814 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
815 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
816 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
817 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
818 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
819 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
820 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
821 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
822 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
823 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
824 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
825 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
826 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
827 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
829 estats->pause_frames_received_hi =
830 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
831 estats->pause_frames_received_lo =
832 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
833 ADD_64(estats->pause_frames_received_hi,
834 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
835 estats->pause_frames_received_lo,
836 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
838 estats->pause_frames_sent_hi =
839 pstats->mac_stx[1].tx_stat_outxonsent_hi;
840 estats->pause_frames_sent_lo =
841 pstats->mac_stx[1].tx_stat_outxonsent_lo;
842 ADD_64(estats->pause_frames_sent_hi,
843 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
844 estats->pause_frames_sent_lo,
845 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
849 bxe_hw_stats_update(struct bxe_softc *sc)
851 struct nig_stats *new = BXE_SP(sc, nig_stats);
852 struct nig_stats *old = &(sc->port.old_nig_stats);
853 struct host_port_stats *pstats = BXE_SP(sc, port_stats);
854 struct bxe_eth_stats *estats = &sc->eth_stats;
855 uint32_t lpi_reg, nig_timer_max;
861 switch (sc->link_vars.mac_type) {
862 case ELINK_MAC_TYPE_BMAC:
863 bxe_bmac_stats_update(sc);
866 case ELINK_MAC_TYPE_EMAC:
867 bxe_emac_stats_update(sc);
870 case ELINK_MAC_TYPE_UMAC:
871 case ELINK_MAC_TYPE_XMAC:
872 bxe_mstat_stats_update(sc);
875 case ELINK_MAC_TYPE_NONE: /* unreached */
877 "stats updated by DMAE but no MAC active\n");
880 default: /* unreached */
881 BLOGE(sc, "stats update failed, unknown MAC type\n");
884 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
885 new->brb_discard - old->brb_discard);
886 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
887 new->brb_truncate - old->brb_truncate);
889 if (!CHIP_IS_E3(sc)) {
890 UPDATE_STAT64_NIG(egress_mac_pkt0,
891 etherstatspkts1024octetsto1522octets);
892 UPDATE_STAT64_NIG(egress_mac_pkt1,
893 etherstatspktsover1522octets);
896 memcpy(old, new, sizeof(struct nig_stats));
898 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
899 sizeof(struct mac_stx));
900 estats->brb_drop_hi = pstats->brb_drop_hi;
901 estats->brb_drop_lo = pstats->brb_drop_lo;
903 pstats->host_port_stats_counter++;
905 if (CHIP_IS_E3(sc)) {
906 lpi_reg = (SC_PORT(sc)) ?
907 MISC_REG_CPMU_LP_SM_ENT_CNT_P1 :
908 MISC_REG_CPMU_LP_SM_ENT_CNT_P0;
909 estats->eee_tx_lpi += REG_RD(sc, lpi_reg);
912 if (!BXE_NOMCP(sc)) {
913 nig_timer_max = SHMEM_RD(sc, port_mb[SC_PORT(sc)].stat_nig_timer);
914 if (nig_timer_max != estats->nig_timer_max) {
915 estats->nig_timer_max = nig_timer_max;
916 BLOGE(sc, "invalid NIG timer max (%u)\n",
917 estats->nig_timer_max);
925 bxe_storm_stats_validate_counters(struct bxe_softc *sc)
927 struct stats_counter *counters = &sc->fw_stats_data->storm_counters;
928 uint16_t cur_stats_counter;
931 * Make sure we use the value of the counter
932 * used for sending the last stats ramrod.
935 cur_stats_counter = (sc->stats_counter - 1);
936 BXE_STATS_UNLOCK(sc);
938 /* are storm stats valid? */
939 if (le16toh(counters->xstats_counter) != cur_stats_counter) {
941 "stats not updated by xstorm, "
942 "counter 0x%x != stats_counter 0x%x\n",
943 le16toh(counters->xstats_counter), sc->stats_counter);
947 if (le16toh(counters->ustats_counter) != cur_stats_counter) {
949 "stats not updated by ustorm, "
950 "counter 0x%x != stats_counter 0x%x\n",
951 le16toh(counters->ustats_counter), sc->stats_counter);
955 if (le16toh(counters->cstats_counter) != cur_stats_counter) {
957 "stats not updated by cstorm, "
958 "counter 0x%x != stats_counter 0x%x\n",
959 le16toh(counters->cstats_counter), sc->stats_counter);
963 if (le16toh(counters->tstats_counter) != cur_stats_counter) {
965 "stats not updated by tstorm, "
966 "counter 0x%x != stats_counter 0x%x\n",
967 le16toh(counters->tstats_counter), sc->stats_counter);
975 bxe_storm_stats_update(struct bxe_softc *sc)
977 struct tstorm_per_port_stats *tport =
978 &sc->fw_stats_data->port.tstorm_port_statistics;
979 struct tstorm_per_pf_stats *tfunc =
980 &sc->fw_stats_data->pf.tstorm_pf_statistics;
981 struct host_func_stats *fstats = &sc->func_stats;
982 struct bxe_eth_stats *estats = &sc->eth_stats;
983 struct bxe_eth_stats_old *estats_old = &sc->eth_stats_old;
986 /* vfs stat counter is managed by pf */
987 if (IS_PF(sc) && bxe_storm_stats_validate_counters(sc)) {
991 estats->error_bytes_received_hi = 0;
992 estats->error_bytes_received_lo = 0;
994 for (i = 0; i < sc->num_queues; i++) {
995 struct bxe_fastpath *fp = &sc->fp[i];
996 struct tstorm_per_queue_stats *tclient =
997 &sc->fw_stats_data->queue_stats[i].tstorm_queue_statistics;
998 struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient;
999 struct ustorm_per_queue_stats *uclient =
1000 &sc->fw_stats_data->queue_stats[i].ustorm_queue_statistics;
1001 struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient;
1002 struct xstorm_per_queue_stats *xclient =
1003 &sc->fw_stats_data->queue_stats[i].xstorm_queue_statistics;
1004 struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient;
1005 struct bxe_eth_q_stats *qstats = &fp->eth_q_stats;
1006 struct bxe_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
1010 BLOGD(sc, DBG_STATS,
1011 "queue[%d]: ucast_sent 0x%x bcast_sent 0x%x mcast_sent 0x%x\n",
1012 i, xclient->ucast_pkts_sent, xclient->bcast_pkts_sent,
1013 xclient->mcast_pkts_sent);
1015 BLOGD(sc, DBG_STATS, "---------------\n");
1017 UPDATE_QSTAT(tclient->rcv_bcast_bytes,
1018 total_broadcast_bytes_received);
1019 UPDATE_QSTAT(tclient->rcv_mcast_bytes,
1020 total_multicast_bytes_received);
1021 UPDATE_QSTAT(tclient->rcv_ucast_bytes,
1022 total_unicast_bytes_received);
1025 * sum to total_bytes_received all
1026 * unicast/multicast/broadcast
1028 qstats->total_bytes_received_hi =
1029 qstats->total_broadcast_bytes_received_hi;
1030 qstats->total_bytes_received_lo =
1031 qstats->total_broadcast_bytes_received_lo;
1033 ADD_64(qstats->total_bytes_received_hi,
1034 qstats->total_multicast_bytes_received_hi,
1035 qstats->total_bytes_received_lo,
1036 qstats->total_multicast_bytes_received_lo);
1038 ADD_64(qstats->total_bytes_received_hi,
1039 qstats->total_unicast_bytes_received_hi,
1040 qstats->total_bytes_received_lo,
1041 qstats->total_unicast_bytes_received_lo);
1043 qstats->valid_bytes_received_hi = qstats->total_bytes_received_hi;
1044 qstats->valid_bytes_received_lo = qstats->total_bytes_received_lo;
1046 UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, total_unicast_packets_received);
1047 UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, total_multicast_packets_received);
1048 UPDATE_EXTEND_TSTAT(rcv_bcast_pkts, total_broadcast_packets_received);
1049 UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard,
1050 etherstatsoverrsizepkts, 32);
1051 UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard, 16);
1053 SUB_EXTEND_USTAT(ucast_no_buff_pkts, total_unicast_packets_received);
1054 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
1055 total_multicast_packets_received);
1056 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
1057 total_broadcast_packets_received);
1058 UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard);
1059 UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard);
1060 UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard);
1062 UPDATE_QSTAT(xclient->bcast_bytes_sent,
1063 total_broadcast_bytes_transmitted);
1064 UPDATE_QSTAT(xclient->mcast_bytes_sent,
1065 total_multicast_bytes_transmitted);
1066 UPDATE_QSTAT(xclient->ucast_bytes_sent,
1067 total_unicast_bytes_transmitted);
1070 * sum to total_bytes_transmitted all
1071 * unicast/multicast/broadcast
1073 qstats->total_bytes_transmitted_hi =
1074 qstats->total_unicast_bytes_transmitted_hi;
1075 qstats->total_bytes_transmitted_lo =
1076 qstats->total_unicast_bytes_transmitted_lo;
1078 ADD_64(qstats->total_bytes_transmitted_hi,
1079 qstats->total_broadcast_bytes_transmitted_hi,
1080 qstats->total_bytes_transmitted_lo,
1081 qstats->total_broadcast_bytes_transmitted_lo);
1083 ADD_64(qstats->total_bytes_transmitted_hi,
1084 qstats->total_multicast_bytes_transmitted_hi,
1085 qstats->total_bytes_transmitted_lo,
1086 qstats->total_multicast_bytes_transmitted_lo);
1088 UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
1089 total_unicast_packets_transmitted);
1090 UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
1091 total_multicast_packets_transmitted);
1092 UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
1093 total_broadcast_packets_transmitted);
1095 UPDATE_EXTEND_TSTAT(checksum_discard,
1096 total_packets_received_checksum_discarded);
1097 UPDATE_EXTEND_TSTAT(ttl0_discard,
1098 total_packets_received_ttl0_discarded);
1100 UPDATE_EXTEND_XSTAT(error_drop_pkts,
1101 total_transmitted_dropped_packets_error);
1103 /* TPA aggregations completed */
1104 UPDATE_EXTEND_E_USTAT(coalesced_events, total_tpa_aggregations);
1105 /* Number of network frames aggregated by TPA */
1106 UPDATE_EXTEND_E_USTAT(coalesced_pkts, total_tpa_aggregated_frames);
1107 /* Total number of bytes in completed TPA aggregations */
1108 UPDATE_QSTAT(uclient->coalesced_bytes, total_tpa_bytes);
1110 UPDATE_ESTAT_QSTAT_64(total_tpa_bytes);
1112 UPDATE_FSTAT_QSTAT(total_bytes_received);
1113 UPDATE_FSTAT_QSTAT(total_bytes_transmitted);
1114 UPDATE_FSTAT_QSTAT(total_unicast_packets_received);
1115 UPDATE_FSTAT_QSTAT(total_multicast_packets_received);
1116 UPDATE_FSTAT_QSTAT(total_broadcast_packets_received);
1117 UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted);
1118 UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted);
1119 UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted);
1120 UPDATE_FSTAT_QSTAT(valid_bytes_received);
1123 ADD_64(estats->total_bytes_received_hi,
1124 estats->rx_stat_ifhcinbadoctets_hi,
1125 estats->total_bytes_received_lo,
1126 estats->rx_stat_ifhcinbadoctets_lo);
1128 ADD_64_LE(estats->total_bytes_received_hi,
1129 tfunc->rcv_error_bytes.hi,
1130 estats->total_bytes_received_lo,
1131 tfunc->rcv_error_bytes.lo);
1133 ADD_64_LE(estats->error_bytes_received_hi,
1134 tfunc->rcv_error_bytes.hi,
1135 estats->error_bytes_received_lo,
1136 tfunc->rcv_error_bytes.lo);
1138 UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong);
1140 ADD_64(estats->error_bytes_received_hi,
1141 estats->rx_stat_ifhcinbadoctets_hi,
1142 estats->error_bytes_received_lo,
1143 estats->rx_stat_ifhcinbadoctets_lo);
1146 struct bxe_fw_port_stats_old *fwstats = &sc->fw_stats_old;
1147 UPDATE_FW_STAT(mac_filter_discard);
1148 UPDATE_FW_STAT(mf_tag_discard);
1149 UPDATE_FW_STAT(brb_truncate_discard);
1150 UPDATE_FW_STAT(mac_discard);
1153 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
1155 sc->stats_pending = 0;
1161 bxe_net_stats_update(struct bxe_softc *sc)
1163 struct bxe_eth_stats *estats = &sc->eth_stats;
1164 struct ifnet *ifnet = sc->ifnet;
1168 ifnet->if_data.ifi_ipackets =
1169 bxe_hilo(&estats->total_unicast_packets_received_hi) +
1170 bxe_hilo(&estats->total_multicast_packets_received_hi) +
1171 bxe_hilo(&estats->total_broadcast_packets_received_hi);
1173 ifnet->if_data.ifi_opackets =
1174 bxe_hilo(&estats->total_unicast_packets_transmitted_hi) +
1175 bxe_hilo(&estats->total_multicast_packets_transmitted_hi) +
1176 bxe_hilo(&estats->total_broadcast_packets_transmitted_hi);
1178 ifnet->if_data.ifi_ibytes = bxe_hilo(&estats->total_bytes_received_hi);
1180 ifnet->if_data.ifi_obytes = bxe_hilo(&estats->total_bytes_transmitted_hi);
1183 for (i = 0; i < sc->num_queues; i++) {
1184 struct tstorm_per_queue_stats *old_tclient =
1185 &sc->fp[i].old_tclient;
1186 tmp += le32toh(old_tclient->checksum_discard);
1189 ifnet->if_data.ifi_iqdrops = tmp;
1191 ifnet->if_data.ifi_ierrors =
1192 bxe_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
1193 bxe_hilo(&estats->etherstatsoverrsizepkts_hi) +
1194 bxe_hilo(&estats->brb_drop_hi) +
1195 bxe_hilo(&estats->brb_truncate_hi) +
1196 bxe_hilo(&estats->rx_stat_dot3statsfcserrors_hi) +
1197 bxe_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi) +
1198 bxe_hilo(&estats->no_buff_discard_hi);
1200 ifnet->if_data.ifi_oerrors =
1201 bxe_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi) +
1202 bxe_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
1204 ifnet->if_data.ifi_imcasts =
1205 bxe_hilo(&estats->total_multicast_packets_received_hi);
1207 ifnet->if_data.ifi_collisions =
1208 bxe_hilo(&estats->tx_stat_etherstatscollisions_hi) +
1209 bxe_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
1210 bxe_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
1214 bxe_drv_stats_update(struct bxe_softc *sc)
1216 struct bxe_eth_stats *estats = &sc->eth_stats;
1219 for (i = 0; i < sc->num_queues; i++) {
1220 struct bxe_eth_q_stats *qstats = &sc->fp[i].eth_q_stats;
1221 struct bxe_eth_q_stats_old *qstats_old = &sc->fp[i].eth_q_stats_old;
1223 UPDATE_ESTAT_QSTAT(rx_calls);
1224 UPDATE_ESTAT_QSTAT(rx_pkts);
1225 UPDATE_ESTAT_QSTAT(rx_tpa_pkts);
1226 UPDATE_ESTAT_QSTAT(rx_soft_errors);
1227 UPDATE_ESTAT_QSTAT(rx_hw_csum_errors);
1228 UPDATE_ESTAT_QSTAT(rx_ofld_frames_csum_ip);
1229 UPDATE_ESTAT_QSTAT(rx_ofld_frames_csum_tcp_udp);
1230 UPDATE_ESTAT_QSTAT(rx_budget_reached);
1231 UPDATE_ESTAT_QSTAT(tx_pkts);
1232 UPDATE_ESTAT_QSTAT(tx_soft_errors);
1233 UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_ip);
1234 UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_tcp);
1235 UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_udp);
1236 UPDATE_ESTAT_QSTAT(tx_ofld_frames_lso);
1237 UPDATE_ESTAT_QSTAT(tx_ofld_frames_lso_hdr_splits);
1238 UPDATE_ESTAT_QSTAT(tx_encap_failures);
1239 UPDATE_ESTAT_QSTAT(tx_hw_queue_full);
1240 UPDATE_ESTAT_QSTAT(tx_hw_max_queue_depth);
1241 UPDATE_ESTAT_QSTAT(tx_dma_mapping_failure);
1242 UPDATE_ESTAT_QSTAT(tx_max_drbr_queue_depth);
1243 UPDATE_ESTAT_QSTAT(tx_window_violation_std);
1244 UPDATE_ESTAT_QSTAT(tx_window_violation_tso);
1245 //UPDATE_ESTAT_QSTAT(tx_unsupported_tso_request_ipv6);
1246 //UPDATE_ESTAT_QSTAT(tx_unsupported_tso_request_not_tcp);
1247 UPDATE_ESTAT_QSTAT(tx_chain_lost_mbuf);
1248 UPDATE_ESTAT_QSTAT(tx_frames_deferred);
1249 UPDATE_ESTAT_QSTAT(tx_queue_xoff);
1251 /* mbuf driver statistics */
1252 UPDATE_ESTAT_QSTAT(mbuf_defrag_attempts);
1253 UPDATE_ESTAT_QSTAT(mbuf_defrag_failures);
1254 UPDATE_ESTAT_QSTAT(mbuf_rx_bd_alloc_failed);
1255 UPDATE_ESTAT_QSTAT(mbuf_rx_bd_mapping_failed);
1256 UPDATE_ESTAT_QSTAT(mbuf_rx_tpa_alloc_failed);
1257 UPDATE_ESTAT_QSTAT(mbuf_rx_tpa_mapping_failed);
1258 UPDATE_ESTAT_QSTAT(mbuf_rx_sge_alloc_failed);
1259 UPDATE_ESTAT_QSTAT(mbuf_rx_sge_mapping_failed);
1261 /* track the number of allocated mbufs */
1262 UPDATE_ESTAT_QSTAT(mbuf_alloc_tx);
1263 UPDATE_ESTAT_QSTAT(mbuf_alloc_rx);
1264 UPDATE_ESTAT_QSTAT(mbuf_alloc_sge);
1265 UPDATE_ESTAT_QSTAT(mbuf_alloc_tpa);
1270 bxe_edebug_stats_stopped(struct bxe_softc *sc)
1274 if (SHMEM2_HAS(sc, edebug_driver_if[1])) {
1275 val = SHMEM2_RD(sc, edebug_driver_if[1]);
1277 if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT) {
1286 bxe_stats_update(struct bxe_softc *sc)
1288 uint32_t *stats_comp = BXE_SP(sc, stats_comp);
1290 if (bxe_edebug_stats_stopped(sc)) {
1295 if (*stats_comp != DMAE_COMP_VAL) {
1300 bxe_hw_stats_update(sc);
1303 if (bxe_storm_stats_update(sc)) {
1304 if (sc->stats_pending++ == 3) {
1305 bxe_panic(sc, ("storm stats not updated for 3 times\n"));
1311 * VF doesn't collect HW statistics, and doesn't get completions,
1312 * performs only update.
1314 bxe_storm_stats_update(sc);
1317 bxe_net_stats_update(sc);
1318 bxe_drv_stats_update(sc);
1325 bxe_hw_stats_post(sc);
1326 bxe_storm_stats_post(sc);
1330 bxe_port_stats_stop(struct bxe_softc *sc)
1332 struct dmae_command *dmae;
1334 int loader_idx = PMF_DMAE_C(sc);
1335 uint32_t *stats_comp = BXE_SP(sc, stats_comp);
1337 sc->executer_idx = 0;
1339 opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC, FALSE, 0);
1341 if (sc->port.port_stx) {
1342 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
1345 dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
1347 dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
1350 dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
1351 dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
1352 dmae->dst_addr_lo = sc->port.port_stx >> 2;
1353 dmae->dst_addr_hi = 0;
1354 dmae->len = bxe_get_port_stats_dma_len(sc);
1356 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
1357 dmae->comp_addr_hi = 0;
1360 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
1361 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
1362 dmae->comp_val = DMAE_COMP_VAL;
1369 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
1370 dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
1371 dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, func_stats));
1372 dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, func_stats));
1373 dmae->dst_addr_lo = (sc->func_stx >> 2);
1374 dmae->dst_addr_hi = 0;
1375 dmae->len = (sizeof(struct host_func_stats) >> 2);
1376 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
1377 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
1378 dmae->comp_val = DMAE_COMP_VAL;
1385 bxe_stats_stop(struct bxe_softc *sc)
1387 uint8_t update = FALSE;
1392 update = bxe_hw_stats_update(sc) == 0;
1395 update |= bxe_storm_stats_update(sc) == 0;
1398 bxe_net_stats_update(sc);
1401 bxe_port_stats_stop(sc);
1404 bxe_hw_stats_post(sc);
1410 bxe_stats_do_nothing(struct bxe_softc *sc)
1415 static const struct {
1416 void (*action)(struct bxe_softc *sc);
1417 enum bxe_stats_state next_state;
1418 } bxe_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
1420 /* DISABLED PMF */ { bxe_stats_pmf_update, STATS_STATE_DISABLED },
1421 /* LINK_UP */ { bxe_stats_start, STATS_STATE_ENABLED },
1422 /* UPDATE */ { bxe_stats_do_nothing, STATS_STATE_DISABLED },
1423 /* STOP */ { bxe_stats_do_nothing, STATS_STATE_DISABLED }
1426 /* ENABLED PMF */ { bxe_stats_pmf_start, STATS_STATE_ENABLED },
1427 /* LINK_UP */ { bxe_stats_restart, STATS_STATE_ENABLED },
1428 /* UPDATE */ { bxe_stats_update, STATS_STATE_ENABLED },
1429 /* STOP */ { bxe_stats_stop, STATS_STATE_DISABLED }
1433 void bxe_stats_handle(struct bxe_softc *sc,
1434 enum bxe_stats_event event)
1436 enum bxe_stats_state state;
1438 if (__predict_false(sc->panic)) {
1443 state = sc->stats_state;
1444 sc->stats_state = bxe_stats_stm[state][event].next_state;
1445 BXE_STATS_UNLOCK(sc);
1447 bxe_stats_stm[state][event].action(sc);
1449 if (event != STATS_EVENT_UPDATE) {
1450 BLOGD(sc, DBG_STATS,
1451 "state %d -> event %d -> state %d\n",
1452 state, event, sc->stats_state);
1457 bxe_port_stats_base_init(struct bxe_softc *sc)
1459 struct dmae_command *dmae;
1460 uint32_t *stats_comp = BXE_SP(sc, stats_comp);
1463 if (!sc->port.pmf || !sc->port.port_stx) {
1464 BLOGE(sc, "BUG!\n");
1468 sc->executer_idx = 0;
1470 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
1471 dmae->opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
1472 TRUE, DMAE_COMP_PCI);
1473 dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
1474 dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
1475 dmae->dst_addr_lo = (sc->port.port_stx >> 2);
1476 dmae->dst_addr_hi = 0;
1477 dmae->len = bxe_get_port_stats_dma_len(sc);
1478 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
1479 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
1480 dmae->comp_val = DMAE_COMP_VAL;
1483 bxe_hw_stats_post(sc);
1488 * This function will prepare the statistics ramrod data the way
1489 * we will only have to increment the statistics counter and
1490 * send the ramrod each time we have to.
1493 bxe_prep_fw_stats_req(struct bxe_softc *sc)
1496 int first_queue_query_index;
1497 struct stats_query_header *stats_hdr = &sc->fw_stats_req->hdr;
1498 bus_addr_t cur_data_offset;
1499 struct stats_query_entry *cur_query_entry;
1501 stats_hdr->cmd_num = sc->fw_stats_num;
1502 stats_hdr->drv_stats_counter = 0;
1505 * The storm_counters struct contains the counters of completed
1506 * statistics requests per storm which are incremented by FW
1507 * each time it completes hadning a statistics ramrod. We will
1508 * check these counters in the timer handler and discard a
1509 * (statistics) ramrod completion.
1511 cur_data_offset = (sc->fw_stats_data_mapping +
1512 offsetof(struct bxe_fw_stats_data, storm_counters));
1514 stats_hdr->stats_counters_addrs.hi = htole32(U64_HI(cur_data_offset));
1515 stats_hdr->stats_counters_addrs.lo = htole32(U64_LO(cur_data_offset));
1518 * Prepare the first stats ramrod (will be completed with
1519 * the counters equal to zero) - init counters to somethig different.
1521 memset(&sc->fw_stats_data->storm_counters, 0xff,
1522 sizeof(struct stats_counter));
1524 /**** Port FW statistics data ****/
1525 cur_data_offset = (sc->fw_stats_data_mapping +
1526 offsetof(struct bxe_fw_stats_data, port));
1528 cur_query_entry = &sc->fw_stats_req->query[BXE_PORT_QUERY_IDX];
1530 cur_query_entry->kind = STATS_TYPE_PORT;
1531 /* For port query index is a DONT CARE */
1532 cur_query_entry->index = SC_PORT(sc);
1533 /* For port query funcID is a DONT CARE */
1534 cur_query_entry->funcID = htole16(SC_FUNC(sc));
1535 cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1536 cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1538 /**** PF FW statistics data ****/
1539 cur_data_offset = (sc->fw_stats_data_mapping +
1540 offsetof(struct bxe_fw_stats_data, pf));
1542 cur_query_entry = &sc->fw_stats_req->query[BXE_PF_QUERY_IDX];
1544 cur_query_entry->kind = STATS_TYPE_PF;
1545 /* For PF query index is a DONT CARE */
1546 cur_query_entry->index = SC_PORT(sc);
1547 cur_query_entry->funcID = htole16(SC_FUNC(sc));
1548 cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1549 cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1552 /**** FCoE FW statistics data ****/
1554 cur_data_offset = (sc->fw_stats_data_mapping +
1555 offsetof(struct bxe_fw_stats_data, fcoe));
1557 cur_query_entry = &sc->fw_stats_req->query[BXE_FCOE_QUERY_IDX];
1559 cur_query_entry->kind = STATS_TYPE_FCOE;
1560 /* For FCoE query index is a DONT CARE */
1561 cur_query_entry->index = SC_PORT(sc);
1562 cur_query_entry->funcID = cpu_to_le16(SC_FUNC(sc));
1563 cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1564 cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1568 /**** Clients' queries ****/
1569 cur_data_offset = (sc->fw_stats_data_mapping +
1570 offsetof(struct bxe_fw_stats_data, queue_stats));
1573 * First queue query index depends whether FCoE offloaded request will
1574 * be included in the ramrod
1578 first_queue_query_index = BXE_FIRST_QUEUE_QUERY_IDX;
1581 first_queue_query_index = (BXE_FIRST_QUEUE_QUERY_IDX - 1);
1583 for (i = 0; i < sc->num_queues; i++) {
1585 &sc->fw_stats_req->query[first_queue_query_index + i];
1587 cur_query_entry->kind = STATS_TYPE_QUEUE;
1588 cur_query_entry->index = bxe_stats_id(&sc->fp[i]);
1589 cur_query_entry->funcID = htole16(SC_FUNC(sc));
1590 cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1591 cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1593 cur_data_offset += sizeof(struct per_queue_stats);
1597 /* add FCoE queue query if needed */
1600 &sc->fw_stats_req->query[first_queue_query_index + i];
1602 cur_query_entry->kind = STATS_TYPE_QUEUE;
1603 cur_query_entry->index = bxe_stats_id(&sc->fp[FCOE_IDX(sc)]);
1604 cur_query_entry->funcID = htole16(SC_FUNC(sc));
1605 cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1606 cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1612 bxe_stats_init(struct bxe_softc *sc)
1614 int /*abs*/port = SC_PORT(sc);
1615 int mb_idx = SC_FW_MB_IDX(sc);
1618 sc->stats_pending = 0;
1619 sc->executer_idx = 0;
1620 sc->stats_counter = 0;
1622 /* port and func stats for management */
1623 if (!BXE_NOMCP(sc)) {
1624 sc->port.port_stx = SHMEM_RD(sc, port_mb[port].port_stx);
1625 sc->func_stx = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_param);
1627 sc->port.port_stx = 0;
1631 BLOGD(sc, DBG_STATS, "port_stx 0x%x func_stx 0x%x\n",
1632 sc->port.port_stx, sc->func_stx);
1634 /* pmf should retrieve port statistics from SP on a non-init*/
1635 if (!sc->stats_init && sc->port.pmf && sc->port.port_stx) {
1636 bxe_stats_handle(sc, STATS_EVENT_PMF);
1641 memset(&(sc->port.old_nig_stats), 0, sizeof(struct nig_stats));
1642 sc->port.old_nig_stats.brb_discard =
1643 REG_RD(sc, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
1644 sc->port.old_nig_stats.brb_truncate =
1645 REG_RD(sc, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
1646 if (!CHIP_IS_E3(sc)) {
1647 REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
1648 &(sc->port.old_nig_stats.egress_mac_pkt0_lo), 2);
1649 REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
1650 &(sc->port.old_nig_stats.egress_mac_pkt1_lo), 2);
1653 /* function stats */
1654 for (i = 0; i < sc->num_queues; i++) {
1655 memset(&sc->fp[i].old_tclient, 0, sizeof(sc->fp[i].old_tclient));
1656 memset(&sc->fp[i].old_uclient, 0, sizeof(sc->fp[i].old_uclient));
1657 memset(&sc->fp[i].old_xclient, 0, sizeof(sc->fp[i].old_xclient));
1658 if (sc->stats_init) {
1659 memset(&sc->fp[i].eth_q_stats, 0,
1660 sizeof(sc->fp[i].eth_q_stats));
1661 memset(&sc->fp[i].eth_q_stats_old, 0,
1662 sizeof(sc->fp[i].eth_q_stats_old));
1666 /* prepare statistics ramrod data */
1667 bxe_prep_fw_stats_req(sc);
1669 sc->ifnet->if_data.ifi_ipackets = 0;
1670 sc->ifnet->if_data.ifi_opackets = 0;
1671 sc->ifnet->if_data.ifi_ibytes = 0;
1672 sc->ifnet->if_data.ifi_obytes = 0;
1673 sc->ifnet->if_data.ifi_ierrors = 0;
1674 sc->ifnet->if_data.ifi_oerrors = 0;
1675 sc->ifnet->if_data.ifi_imcasts = 0;
1676 sc->ifnet->if_data.ifi_collisions = 0;
1678 if (sc->stats_init) {
1679 memset(&sc->net_stats_old, 0, sizeof(sc->net_stats_old));
1680 memset(&sc->fw_stats_old, 0, sizeof(sc->fw_stats_old));
1681 memset(&sc->eth_stats_old, 0, sizeof(sc->eth_stats_old));
1682 memset(&sc->eth_stats, 0, sizeof(sc->eth_stats));
1683 memset(&sc->func_stats, 0, sizeof(sc->func_stats));
1685 /* Clean SP from previous statistics */
1687 memset(BXE_SP(sc, func_stats), 0, sizeof(struct host_func_stats));
1688 bxe_func_stats_init(sc);
1689 bxe_hw_stats_post(sc);
1694 sc->stats_state = STATS_STATE_DISABLED;
1696 if (sc->port.pmf && sc->port.port_stx) {
1697 bxe_port_stats_base_init(sc);
1700 /* mark the end of statistics initializiation */
1701 sc->stats_init = FALSE;
1705 bxe_save_statistics(struct bxe_softc *sc)
1709 /* save queue statistics */
1710 for (i = 0; i < sc->num_queues; i++) {
1711 struct bxe_fastpath *fp = &sc->fp[i];
1712 struct bxe_eth_q_stats *qstats = &fp->eth_q_stats;
1713 struct bxe_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
1715 UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
1716 UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
1717 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi);
1718 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo);
1719 UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi);
1720 UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo);
1721 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi);
1722 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo);
1723 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi);
1724 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo);
1725 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi);
1726 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo);
1727 UPDATE_QSTAT_OLD(total_tpa_bytes_hi);
1728 UPDATE_QSTAT_OLD(total_tpa_bytes_lo);
1731 /* save net_device_stats statistics */
1732 sc->net_stats_old.rx_dropped = sc->ifnet->if_data.ifi_iqdrops;
1734 /* store port firmware statistics */
1736 struct bxe_eth_stats *estats = &sc->eth_stats;
1737 struct bxe_fw_port_stats_old *fwstats = &sc->fw_stats_old;
1738 struct host_port_stats *pstats = BXE_SP(sc, port_stats);
1740 fwstats->pfc_frames_rx_hi = pstats->pfc_frames_rx_hi;
1741 fwstats->pfc_frames_rx_lo = pstats->pfc_frames_rx_lo;
1742 fwstats->pfc_frames_tx_hi = pstats->pfc_frames_tx_hi;
1743 fwstats->pfc_frames_tx_lo = pstats->pfc_frames_tx_lo;
1746 UPDATE_FW_STAT_OLD(mac_filter_discard);
1747 UPDATE_FW_STAT_OLD(mf_tag_discard);
1748 UPDATE_FW_STAT_OLD(brb_truncate_discard);
1749 UPDATE_FW_STAT_OLD(mac_discard);
1755 bxe_afex_collect_stats(struct bxe_softc *sc,
1756 void *void_afex_stats,
1757 uint32_t stats_type)
1760 struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats;
1761 struct bxe_eth_stats *estats = &sc->eth_stats;
1763 struct per_queue_stats *fcoe_q_stats =
1764 &sc->fw_stats_data->queue_stats[FCOE_IDX(sc)];
1766 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
1767 &fcoe_q_stats->tstorm_queue_statistics;
1769 struct ustorm_per_queue_stats *fcoe_q_ustorm_stats =
1770 &fcoe_q_stats->ustorm_queue_statistics;
1772 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
1773 &fcoe_q_stats->xstorm_queue_statistics;
1775 struct fcoe_statistics_params *fw_fcoe_stat =
1776 &sc->fw_stats_data->fcoe;
1779 memset(afex_stats, 0, sizeof(struct afex_stats));
1781 for (i = 0; i < sc->num_queues; i++) {
1782 struct bxe_eth_q_stats *qstats = &sc->fp[i].eth_q_stats;
1784 ADD_64(afex_stats->rx_unicast_bytes_hi,
1785 qstats->total_unicast_bytes_received_hi,
1786 afex_stats->rx_unicast_bytes_lo,
1787 qstats->total_unicast_bytes_received_lo);
1789 ADD_64(afex_stats->rx_broadcast_bytes_hi,
1790 qstats->total_broadcast_bytes_received_hi,
1791 afex_stats->rx_broadcast_bytes_lo,
1792 qstats->total_broadcast_bytes_received_lo);
1794 ADD_64(afex_stats->rx_multicast_bytes_hi,
1795 qstats->total_multicast_bytes_received_hi,
1796 afex_stats->rx_multicast_bytes_lo,
1797 qstats->total_multicast_bytes_received_lo);
1799 ADD_64(afex_stats->rx_unicast_frames_hi,
1800 qstats->total_unicast_packets_received_hi,
1801 afex_stats->rx_unicast_frames_lo,
1802 qstats->total_unicast_packets_received_lo);
1804 ADD_64(afex_stats->rx_broadcast_frames_hi,
1805 qstats->total_broadcast_packets_received_hi,
1806 afex_stats->rx_broadcast_frames_lo,
1807 qstats->total_broadcast_packets_received_lo);
1809 ADD_64(afex_stats->rx_multicast_frames_hi,
1810 qstats->total_multicast_packets_received_hi,
1811 afex_stats->rx_multicast_frames_lo,
1812 qstats->total_multicast_packets_received_lo);
1815 * sum to rx_frames_discarded all discarded
1816 * packets due to size, ttl0 and checksum
1818 ADD_64(afex_stats->rx_frames_discarded_hi,
1819 qstats->total_packets_received_checksum_discarded_hi,
1820 afex_stats->rx_frames_discarded_lo,
1821 qstats->total_packets_received_checksum_discarded_lo);
1823 ADD_64(afex_stats->rx_frames_discarded_hi,
1824 qstats->total_packets_received_ttl0_discarded_hi,
1825 afex_stats->rx_frames_discarded_lo,
1826 qstats->total_packets_received_ttl0_discarded_lo);
1828 ADD_64(afex_stats->rx_frames_discarded_hi,
1829 qstats->etherstatsoverrsizepkts_hi,
1830 afex_stats->rx_frames_discarded_lo,
1831 qstats->etherstatsoverrsizepkts_lo);
1833 ADD_64(afex_stats->rx_frames_dropped_hi,
1834 qstats->no_buff_discard_hi,
1835 afex_stats->rx_frames_dropped_lo,
1836 qstats->no_buff_discard_lo);
1838 ADD_64(afex_stats->tx_unicast_bytes_hi,
1839 qstats->total_unicast_bytes_transmitted_hi,
1840 afex_stats->tx_unicast_bytes_lo,
1841 qstats->total_unicast_bytes_transmitted_lo);
1843 ADD_64(afex_stats->tx_broadcast_bytes_hi,
1844 qstats->total_broadcast_bytes_transmitted_hi,
1845 afex_stats->tx_broadcast_bytes_lo,
1846 qstats->total_broadcast_bytes_transmitted_lo);
1848 ADD_64(afex_stats->tx_multicast_bytes_hi,
1849 qstats->total_multicast_bytes_transmitted_hi,
1850 afex_stats->tx_multicast_bytes_lo,
1851 qstats->total_multicast_bytes_transmitted_lo);
1853 ADD_64(afex_stats->tx_unicast_frames_hi,
1854 qstats->total_unicast_packets_transmitted_hi,
1855 afex_stats->tx_unicast_frames_lo,
1856 qstats->total_unicast_packets_transmitted_lo);
1858 ADD_64(afex_stats->tx_broadcast_frames_hi,
1859 qstats->total_broadcast_packets_transmitted_hi,
1860 afex_stats->tx_broadcast_frames_lo,
1861 qstats->total_broadcast_packets_transmitted_lo);
1863 ADD_64(afex_stats->tx_multicast_frames_hi,
1864 qstats->total_multicast_packets_transmitted_hi,
1865 afex_stats->tx_multicast_frames_lo,
1866 qstats->total_multicast_packets_transmitted_lo);
1868 ADD_64(afex_stats->tx_frames_dropped_hi,
1869 qstats->total_transmitted_dropped_packets_error_hi,
1870 afex_stats->tx_frames_dropped_lo,
1871 qstats->total_transmitted_dropped_packets_error_lo);
1876 * Now add FCoE statistics which are collected separately
1877 * (both offloaded and non offloaded)
1880 ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
1882 afex_stats->rx_unicast_bytes_lo,
1883 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
1885 ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
1886 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
1887 afex_stats->rx_unicast_bytes_lo,
1888 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
1890 ADD_64_LE(afex_stats->rx_broadcast_bytes_hi,
1891 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
1892 afex_stats->rx_broadcast_bytes_lo,
1893 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
1895 ADD_64_LE(afex_stats->rx_multicast_bytes_hi,
1896 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
1897 afex_stats->rx_multicast_bytes_lo,
1898 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
1900 ADD_64_LE(afex_stats->rx_unicast_frames_hi,
1902 afex_stats->rx_unicast_frames_lo,
1903 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
1905 ADD_64_LE(afex_stats->rx_unicast_frames_hi,
1907 afex_stats->rx_unicast_frames_lo,
1908 fcoe_q_tstorm_stats->rcv_ucast_pkts);
1910 ADD_64_LE(afex_stats->rx_broadcast_frames_hi,
1912 afex_stats->rx_broadcast_frames_lo,
1913 fcoe_q_tstorm_stats->rcv_bcast_pkts);
1915 ADD_64_LE(afex_stats->rx_multicast_frames_hi,
1917 afex_stats->rx_multicast_frames_lo,
1918 fcoe_q_tstorm_stats->rcv_ucast_pkts);
1920 ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1922 afex_stats->rx_frames_discarded_lo,
1923 fcoe_q_tstorm_stats->checksum_discard);
1925 ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1927 afex_stats->rx_frames_discarded_lo,
1928 fcoe_q_tstorm_stats->pkts_too_big_discard);
1930 ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1932 afex_stats->rx_frames_discarded_lo,
1933 fcoe_q_tstorm_stats->ttl0_discard);
1935 ADD_64_LE16(afex_stats->rx_frames_dropped_hi,
1937 afex_stats->rx_frames_dropped_lo,
1938 fcoe_q_tstorm_stats->no_buff_discard);
1940 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1942 afex_stats->rx_frames_dropped_lo,
1943 fcoe_q_ustorm_stats->ucast_no_buff_pkts);
1945 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1947 afex_stats->rx_frames_dropped_lo,
1948 fcoe_q_ustorm_stats->mcast_no_buff_pkts);
1950 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1952 afex_stats->rx_frames_dropped_lo,
1953 fcoe_q_ustorm_stats->bcast_no_buff_pkts);
1955 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1957 afex_stats->rx_frames_dropped_lo,
1958 fw_fcoe_stat->rx_stat1.fcoe_rx_drop_pkt_cnt);
1960 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1962 afex_stats->rx_frames_dropped_lo,
1963 fw_fcoe_stat->rx_stat2.fcoe_rx_drop_pkt_cnt);
1965 ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
1967 afex_stats->tx_unicast_bytes_lo,
1968 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
1970 ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
1971 fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
1972 afex_stats->tx_unicast_bytes_lo,
1973 fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
1975 ADD_64_LE(afex_stats->tx_broadcast_bytes_hi,
1976 fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
1977 afex_stats->tx_broadcast_bytes_lo,
1978 fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
1980 ADD_64_LE(afex_stats->tx_multicast_bytes_hi,
1981 fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
1982 afex_stats->tx_multicast_bytes_lo,
1983 fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
1985 ADD_64_LE(afex_stats->tx_unicast_frames_hi,
1987 afex_stats->tx_unicast_frames_lo,
1988 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
1990 ADD_64_LE(afex_stats->tx_unicast_frames_hi,
1992 afex_stats->tx_unicast_frames_lo,
1993 fcoe_q_xstorm_stats->ucast_pkts_sent);
1995 ADD_64_LE(afex_stats->tx_broadcast_frames_hi,
1997 afex_stats->tx_broadcast_frames_lo,
1998 fcoe_q_xstorm_stats->bcast_pkts_sent);
2000 ADD_64_LE(afex_stats->tx_multicast_frames_hi,
2002 afex_stats->tx_multicast_frames_lo,
2003 fcoe_q_xstorm_stats->mcast_pkts_sent);
2005 ADD_64_LE(afex_stats->tx_frames_dropped_hi,
2007 afex_stats->tx_frames_dropped_lo,
2008 fcoe_q_xstorm_stats->error_drop_pkts);
2013 * If port stats are requested, add them to the PMF
2014 * stats, as anyway they will be accumulated by the
2015 * MCP before sent to the switch
2017 if ((sc->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) {
2018 ADD_64(afex_stats->rx_frames_dropped_hi,
2020 afex_stats->rx_frames_dropped_lo,
2021 estats->mac_filter_discard);
2022 ADD_64(afex_stats->rx_frames_dropped_hi,
2024 afex_stats->rx_frames_dropped_lo,
2025 estats->brb_truncate_discard);
2026 ADD_64(afex_stats->rx_frames_discarded_hi,
2028 afex_stats->rx_frames_discarded_lo,
2029 estats->mac_discard);