2 * Copyright (c) 2007-2013 Broadcom Corporation. All rights reserved.
4 * Eric Davis <edavis@broadcom.com>
5 * David Christensen <davidch@broadcom.com>
6 * Gary Zambrano <zambrano@broadcom.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of Broadcom Corporation nor the name of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written consent.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
38 #include "bxe_stats.h"
41 #define BITS_PER_LONG 32
43 #define BITS_PER_LONG 64
47 bxe_hilo(uint32_t *hiref)
49 uint32_t lo = *(hiref + 1);
50 #if (BITS_PER_LONG == 64)
52 return (HILO_U64(hi, lo));
58 static inline uint16_t
59 bxe_get_port_stats_dma_len(struct bxe_softc *sc)
64 /* 'newest' convention - shmem2 contains the size of the port stats */
65 if (SHMEM2_HAS(sc, sizeof_port_stats)) {
66 size = SHMEM2_RD(sc, sizeof_port_stats);
71 /* prevent newer BC from causing buffer overflow */
72 if (res > sizeof(struct host_port_stats)) {
73 res = sizeof(struct host_port_stats);
78 * Older convention - all BCs support the port stats fields up until
79 * the 'not_used' field
82 res = (offsetof(struct host_port_stats, not_used) + 4);
84 /* if PFC stats are supported by the MFW, DMA them as well */
85 if (sc->devinfo.bc_ver >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) {
86 res += (offsetof(struct host_port_stats, pfc_frames_rx_lo) -
87 offsetof(struct host_port_stats, pfc_frames_tx_hi) + 4);
93 DBASSERT(sc, !(res > 2 * DMAE_LEN32_RD_MAX), ("big stats dmae length\n"));
98 * Init service functions
102 bxe_dp_stats(struct bxe_softc *sc)
112 " drv_stats_counter %d\n"
114 " stats_counters_addrs %x %x\n",
115 sc->fw_stats_req->hdr.cmd_num,
116 sc->fw_stats_req->hdr.reserved0,
117 sc->fw_stats_req->hdr.drv_stats_counter,
118 sc->fw_stats_req->hdr.reserved1,
119 sc->fw_stats_req->hdr.stats_counters_addrs.hi,
120 sc->fw_stats_req->hdr.stats_counters_addrs.lo);
122 for (i = 0; i < sc->fw_stats_req->hdr.cmd_num; i++) {
131 sc->fw_stats_req->query[i].kind,
132 sc->fw_stats_req->query[i].index,
133 sc->fw_stats_req->query[i].funcID,
134 sc->fw_stats_req->query[i].reserved,
135 sc->fw_stats_req->query[i].address.hi,
136 sc->fw_stats_req->query[i].address.lo);
141 * Post the next statistics ramrod. Protect it with the lock in
142 * order to ensure the strict order between statistics ramrods
143 * (each ramrod has a sequence number passed in a
144 * sc->fw_stats_req->hdr.drv_stats_counter and ramrods must be
148 bxe_storm_stats_post(struct bxe_softc *sc)
152 if (!sc->stats_pending) {
155 if (sc->stats_pending) {
156 BXE_STATS_UNLOCK(sc);
160 sc->fw_stats_req->hdr.drv_stats_counter =
161 htole16(sc->stats_counter++);
164 "sending statistics ramrod %d\n",
165 le16toh(sc->fw_stats_req->hdr.drv_stats_counter));
167 /* adjust the ramrod to include VF queues statistics */
168 // XXX bxe_iov_adjust_stats_req(sc);
172 /* send FW stats ramrod */
173 rc = bxe_sp_post(sc, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
174 U64_HI(sc->fw_stats_req_mapping),
175 U64_LO(sc->fw_stats_req_mapping),
176 NONE_CONNECTION_TYPE);
178 sc->stats_pending = 1;
181 BXE_STATS_UNLOCK(sc);
186 bxe_hw_stats_post(struct bxe_softc *sc)
188 struct dmae_command *dmae = &sc->stats_dmae;
189 uint32_t *stats_comp = BXE_SP(sc, stats_comp);
193 *stats_comp = DMAE_COMP_VAL;
194 if (CHIP_REV_IS_SLOW(sc)) {
198 /* Update MCP's statistics if possible */
200 memcpy(BXE_SP(sc, func_stats), &sc->func_stats,
201 sizeof(sc->func_stats));
205 if (sc->executer_idx) {
206 loader_idx = PMF_DMAE_C(sc);
207 opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
208 TRUE, DMAE_COMP_GRC);
209 opcode = bxe_dmae_opcode_clr_src_reset(opcode);
211 memset(dmae, 0, sizeof(struct dmae_command));
212 dmae->opcode = opcode;
213 dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, dmae[0]));
214 dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, dmae[0]));
215 dmae->dst_addr_lo = ((DMAE_REG_CMD_MEM +
216 sizeof(struct dmae_command) *
217 (loader_idx + 1)) >> 2);
218 dmae->dst_addr_hi = 0;
219 dmae->len = sizeof(struct dmae_command) >> 2;
220 if (CHIP_IS_E1(sc)) {
223 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx + 1] >> 2);
224 dmae->comp_addr_hi = 0;
228 bxe_post_dmae(sc, dmae, loader_idx);
229 } else if (sc->func_stx) {
231 bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc));
236 bxe_stats_comp(struct bxe_softc *sc)
238 uint32_t *stats_comp = BXE_SP(sc, stats_comp);
241 while (*stats_comp != DMAE_COMP_VAL) {
243 BLOGE(sc, "Timeout waiting for stats finished\n");
255 * Statistics service functions
259 bxe_stats_pmf_update(struct bxe_softc *sc)
261 struct dmae_command *dmae;
263 int loader_idx = PMF_DMAE_C(sc);
264 uint32_t *stats_comp = BXE_SP(sc, stats_comp);
266 if (sc->devinfo.bc_ver <= 0x06001400) {
268 * Bootcode v6.0.21 fixed a GRC timeout that occurs when accessing
269 * BRB registers while the BRB block is in reset. The DMA transfer
270 * below triggers this issue resulting in the DMAE to stop
271 * functioning. Skip this initial stats transfer for old bootcode
272 * versions <= 6.0.20.
278 if (!sc->port.pmf || !sc->port.port_stx) {
283 sc->executer_idx = 0;
285 opcode = bxe_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI, FALSE, 0);
287 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
288 dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
289 dmae->src_addr_lo = (sc->port.port_stx >> 2);
290 dmae->src_addr_hi = 0;
291 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
292 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
293 dmae->len = DMAE_LEN32_RD_MAX;
294 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
295 dmae->comp_addr_hi = 0;
298 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
299 dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
300 dmae->src_addr_lo = ((sc->port.port_stx >> 2) + DMAE_LEN32_RD_MAX);
301 dmae->src_addr_hi = 0;
302 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats) +
303 DMAE_LEN32_RD_MAX * 4);
304 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats) +
305 DMAE_LEN32_RD_MAX * 4);
306 dmae->len = (bxe_get_port_stats_dma_len(sc) - DMAE_LEN32_RD_MAX);
308 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
309 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
310 dmae->comp_val = DMAE_COMP_VAL;
313 bxe_hw_stats_post(sc);
318 bxe_port_stats_init(struct bxe_softc *sc)
320 struct dmae_command *dmae;
321 int port = SC_PORT(sc);
323 int loader_idx = PMF_DMAE_C(sc);
325 uint32_t *stats_comp = BXE_SP(sc, stats_comp);
328 if (!sc->link_vars.link_up || !sc->port.pmf) {
333 sc->executer_idx = 0;
336 opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
337 TRUE, DMAE_COMP_GRC);
339 if (sc->port.port_stx) {
340 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
341 dmae->opcode = opcode;
342 dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
343 dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
344 dmae->dst_addr_lo = sc->port.port_stx >> 2;
345 dmae->dst_addr_hi = 0;
346 dmae->len = bxe_get_port_stats_dma_len(sc);
347 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
348 dmae->comp_addr_hi = 0;
353 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
354 dmae->opcode = opcode;
355 dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, func_stats));
356 dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, func_stats));
357 dmae->dst_addr_lo = (sc->func_stx >> 2);
358 dmae->dst_addr_hi = 0;
359 dmae->len = (sizeof(struct host_func_stats) >> 2);
360 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
361 dmae->comp_addr_hi = 0;
366 opcode = bxe_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI,
367 TRUE, DMAE_COMP_GRC);
369 /* EMAC is special */
370 if (sc->link_vars.mac_type == ELINK_MAC_TYPE_EMAC) {
371 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
373 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
374 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
375 dmae->opcode = opcode;
376 dmae->src_addr_lo = (mac_addr + EMAC_REG_EMAC_RX_STAT_AC) >> 2;
377 dmae->src_addr_hi = 0;
378 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats));
379 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats));
380 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
381 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
382 dmae->comp_addr_hi = 0;
385 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
386 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
387 dmae->opcode = opcode;
388 dmae->src_addr_lo = ((mac_addr + EMAC_REG_EMAC_RX_STAT_AC_28) >> 2);
389 dmae->src_addr_hi = 0;
390 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats) +
391 offsetof(struct emac_stats,
392 rx_stat_falsecarriererrors));
393 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats) +
394 offsetof(struct emac_stats,
395 rx_stat_falsecarriererrors));
397 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
398 dmae->comp_addr_hi = 0;
401 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
402 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
403 dmae->opcode = opcode;
404 dmae->src_addr_lo = ((mac_addr + EMAC_REG_EMAC_TX_STAT_AC) >> 2);
405 dmae->src_addr_hi = 0;
406 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats) +
407 offsetof(struct emac_stats,
408 tx_stat_ifhcoutoctets));
409 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats) +
410 offsetof(struct emac_stats,
411 tx_stat_ifhcoutoctets));
412 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
413 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
414 dmae->comp_addr_hi = 0;
417 uint32_t tx_src_addr_lo, rx_src_addr_lo;
418 uint16_t rx_len, tx_len;
420 /* configure the params according to MAC type */
421 switch (sc->link_vars.mac_type) {
422 case ELINK_MAC_TYPE_BMAC:
423 mac_addr = (port) ? NIG_REG_INGRESS_BMAC1_MEM :
424 NIG_REG_INGRESS_BMAC0_MEM;
426 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
427 BIGMAC_REGISTER_TX_STAT_GTBYT */
428 if (CHIP_IS_E1x(sc)) {
430 ((mac_addr + BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2);
431 tx_len = ((8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
432 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2);
434 ((mac_addr + BIGMAC_REGISTER_RX_STAT_GR64) >> 2);
435 rx_len = ((8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
436 BIGMAC_REGISTER_RX_STAT_GR64) >> 2);
439 ((mac_addr + BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2);
440 tx_len = ((8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
441 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2);
443 ((mac_addr + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2);
444 rx_len = ((8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
445 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2);
450 case ELINK_MAC_TYPE_UMAC: /* handled by MSTAT */
451 case ELINK_MAC_TYPE_XMAC: /* handled by MSTAT */
453 mac_addr = (port) ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
454 tx_src_addr_lo = ((mac_addr + MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2);
455 rx_src_addr_lo = ((mac_addr + MSTAT_REG_RX_STAT_GR64_LO) >> 2);
457 (sizeof(sc->sp->mac_stats.mstat_stats.stats_tx) >> 2);
459 (sizeof(sc->sp->mac_stats.mstat_stats.stats_rx) >> 2);
464 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
465 dmae->opcode = opcode;
466 dmae->src_addr_lo = tx_src_addr_lo;
467 dmae->src_addr_hi = 0;
469 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats));
470 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats));
471 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
472 dmae->comp_addr_hi = 0;
476 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
477 dmae->opcode = opcode;
478 dmae->src_addr_hi = 0;
479 dmae->src_addr_lo = rx_src_addr_lo;
481 U64_LO(BXE_SP_MAPPING(sc, mac_stats) + (tx_len << 2));
483 U64_HI(BXE_SP_MAPPING(sc, mac_stats) + (tx_len << 2));
485 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
486 dmae->comp_addr_hi = 0;
491 if (!CHIP_IS_E3(sc)) {
492 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
493 dmae->opcode = opcode;
495 (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
496 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
497 dmae->src_addr_hi = 0;
498 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, nig_stats) +
499 offsetof(struct nig_stats,
500 egress_mac_pkt0_lo));
501 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, nig_stats) +
502 offsetof(struct nig_stats,
503 egress_mac_pkt0_lo));
504 dmae->len = ((2 * sizeof(uint32_t)) >> 2);
505 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
506 dmae->comp_addr_hi = 0;
509 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
510 dmae->opcode = opcode;
512 (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
513 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
514 dmae->src_addr_hi = 0;
515 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, nig_stats) +
516 offsetof(struct nig_stats,
517 egress_mac_pkt1_lo));
518 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, nig_stats) +
519 offsetof(struct nig_stats,
520 egress_mac_pkt1_lo));
521 dmae->len = ((2 * sizeof(uint32_t)) >> 2);
522 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
523 dmae->comp_addr_hi = 0;
527 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
528 dmae->opcode = bxe_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI,
529 TRUE, DMAE_COMP_PCI);
531 (port ? NIG_REG_STAT1_BRB_DISCARD :
532 NIG_REG_STAT0_BRB_DISCARD) >> 2;
533 dmae->src_addr_hi = 0;
534 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, nig_stats));
535 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, nig_stats));
536 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(uint32_t)) >> 2;
538 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
539 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
540 dmae->comp_val = DMAE_COMP_VAL;
546 bxe_func_stats_init(struct bxe_softc *sc)
548 struct dmae_command *dmae = &sc->stats_dmae;
549 uint32_t *stats_comp = BXE_SP(sc, stats_comp);
557 sc->executer_idx = 0;
558 memset(dmae, 0, sizeof(struct dmae_command));
560 dmae->opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
561 TRUE, DMAE_COMP_PCI);
562 dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, func_stats));
563 dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, func_stats));
564 dmae->dst_addr_lo = (sc->func_stx >> 2);
565 dmae->dst_addr_hi = 0;
566 dmae->len = (sizeof(struct host_func_stats) >> 2);
567 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
568 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
569 dmae->comp_val = DMAE_COMP_VAL;
575 bxe_stats_start(struct bxe_softc *sc)
578 * VFs travel through here as part of the statistics FSM, but no action
586 bxe_port_stats_init(sc);
589 else if (sc->func_stx) {
590 bxe_func_stats_init(sc);
593 bxe_hw_stats_post(sc);
594 bxe_storm_stats_post(sc);
598 bxe_stats_pmf_start(struct bxe_softc *sc)
601 bxe_stats_pmf_update(sc);
606 bxe_stats_restart(struct bxe_softc *sc)
609 * VFs travel through here as part of the statistics FSM, but no action
621 bxe_bmac_stats_update(struct bxe_softc *sc)
623 struct host_port_stats *pstats = BXE_SP(sc, port_stats);
624 struct bxe_eth_stats *estats = &sc->eth_stats;
630 if (CHIP_IS_E1x(sc)) {
631 struct bmac1_stats *new = BXE_SP(sc, mac_stats.bmac1_stats);
633 /* the macros below will use "bmac1_stats" type */
634 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
635 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
636 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
637 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
638 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
639 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
640 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
641 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
642 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
644 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
645 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
646 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
647 UPDATE_STAT64(tx_stat_gt127,
648 tx_stat_etherstatspkts65octetsto127octets);
649 UPDATE_STAT64(tx_stat_gt255,
650 tx_stat_etherstatspkts128octetsto255octets);
651 UPDATE_STAT64(tx_stat_gt511,
652 tx_stat_etherstatspkts256octetsto511octets);
653 UPDATE_STAT64(tx_stat_gt1023,
654 tx_stat_etherstatspkts512octetsto1023octets);
655 UPDATE_STAT64(tx_stat_gt1518,
656 tx_stat_etherstatspkts1024octetsto1522octets);
657 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
658 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
659 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
660 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
661 UPDATE_STAT64(tx_stat_gterr,
662 tx_stat_dot3statsinternalmactransmiterrors);
663 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
665 struct bmac2_stats *new = BXE_SP(sc, mac_stats.bmac2_stats);
666 struct bxe_fw_port_stats_old *fwstats = &sc->fw_stats_old;
668 /* the macros below will use "bmac2_stats" type */
669 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
670 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
671 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
672 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
673 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
674 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
675 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
676 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
677 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
678 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
679 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
680 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
681 UPDATE_STAT64(tx_stat_gt127,
682 tx_stat_etherstatspkts65octetsto127octets);
683 UPDATE_STAT64(tx_stat_gt255,
684 tx_stat_etherstatspkts128octetsto255octets);
685 UPDATE_STAT64(tx_stat_gt511,
686 tx_stat_etherstatspkts256octetsto511octets);
687 UPDATE_STAT64(tx_stat_gt1023,
688 tx_stat_etherstatspkts512octetsto1023octets);
689 UPDATE_STAT64(tx_stat_gt1518,
690 tx_stat_etherstatspkts1024octetsto1522octets);
691 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
692 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
693 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
694 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
695 UPDATE_STAT64(tx_stat_gterr,
696 tx_stat_dot3statsinternalmactransmiterrors);
697 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
699 /* collect PFC stats */
700 pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
701 pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
702 ADD_64(pstats->pfc_frames_tx_hi, fwstats->pfc_frames_tx_hi,
703 pstats->pfc_frames_tx_lo, fwstats->pfc_frames_tx_lo);
705 pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
706 pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
707 ADD_64(pstats->pfc_frames_rx_hi, fwstats->pfc_frames_rx_hi,
708 pstats->pfc_frames_rx_lo, fwstats->pfc_frames_rx_lo);
711 estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi;
712 estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_mac_xpf_lo;
714 estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxoffsent_hi;
715 estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo;
717 estats->pfc_frames_received_hi = pstats->pfc_frames_rx_hi;
718 estats->pfc_frames_received_lo = pstats->pfc_frames_rx_lo;
719 estats->pfc_frames_sent_hi = pstats->pfc_frames_tx_hi;
720 estats->pfc_frames_sent_lo = pstats->pfc_frames_tx_lo;
724 bxe_mstat_stats_update(struct bxe_softc *sc)
726 struct host_port_stats *pstats = BXE_SP(sc, port_stats);
727 struct bxe_eth_stats *estats = &sc->eth_stats;
728 struct mstat_stats *new = BXE_SP(sc, mac_stats.mstat_stats);
730 ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
731 ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
732 ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
733 ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
734 ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
735 ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
736 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
737 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
738 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
739 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
741 /* collect pfc stats */
742 ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
743 pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
744 ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
745 pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
747 ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
748 ADD_STAT64(stats_tx.tx_gt127, tx_stat_etherstatspkts65octetsto127octets);
749 ADD_STAT64(stats_tx.tx_gt255, tx_stat_etherstatspkts128octetsto255octets);
750 ADD_STAT64(stats_tx.tx_gt511, tx_stat_etherstatspkts256octetsto511octets);
751 ADD_STAT64(stats_tx.tx_gt1023,
752 tx_stat_etherstatspkts512octetsto1023octets);
753 ADD_STAT64(stats_tx.tx_gt1518,
754 tx_stat_etherstatspkts1024octetsto1522octets);
755 ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
757 ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
758 ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
759 ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
761 ADD_STAT64(stats_tx.tx_gterr, tx_stat_dot3statsinternalmactransmiterrors);
762 ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
764 estats->etherstatspkts1024octetsto1522octets_hi =
765 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi;
766 estats->etherstatspkts1024octetsto1522octets_lo =
767 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo;
769 estats->etherstatspktsover1522octets_hi =
770 pstats->mac_stx[1].tx_stat_mac_2047_hi;
771 estats->etherstatspktsover1522octets_lo =
772 pstats->mac_stx[1].tx_stat_mac_2047_lo;
774 ADD_64(estats->etherstatspktsover1522octets_hi,
775 pstats->mac_stx[1].tx_stat_mac_4095_hi,
776 estats->etherstatspktsover1522octets_lo,
777 pstats->mac_stx[1].tx_stat_mac_4095_lo);
779 ADD_64(estats->etherstatspktsover1522octets_hi,
780 pstats->mac_stx[1].tx_stat_mac_9216_hi,
781 estats->etherstatspktsover1522octets_lo,
782 pstats->mac_stx[1].tx_stat_mac_9216_lo);
784 ADD_64(estats->etherstatspktsover1522octets_hi,
785 pstats->mac_stx[1].tx_stat_mac_16383_hi,
786 estats->etherstatspktsover1522octets_lo,
787 pstats->mac_stx[1].tx_stat_mac_16383_lo);
789 estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi;
790 estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_mac_xpf_lo;
792 estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxoffsent_hi;
793 estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo;
795 estats->pfc_frames_received_hi = pstats->pfc_frames_rx_hi;
796 estats->pfc_frames_received_lo = pstats->pfc_frames_rx_lo;
797 estats->pfc_frames_sent_hi = pstats->pfc_frames_tx_hi;
798 estats->pfc_frames_sent_lo = pstats->pfc_frames_tx_lo;
802 bxe_emac_stats_update(struct bxe_softc *sc)
804 struct emac_stats *new = BXE_SP(sc, mac_stats.emac_stats);
805 struct host_port_stats *pstats = BXE_SP(sc, port_stats);
806 struct bxe_eth_stats *estats = &sc->eth_stats;
808 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
809 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
810 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
811 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
812 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
813 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
814 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
815 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
816 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
817 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
818 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
819 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
820 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
821 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
822 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
823 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
824 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
825 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
826 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
827 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
828 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
829 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
830 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
831 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
832 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
833 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
834 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
835 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
836 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
837 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
838 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
840 estats->pause_frames_received_hi =
841 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
842 estats->pause_frames_received_lo =
843 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
844 ADD_64(estats->pause_frames_received_hi,
845 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
846 estats->pause_frames_received_lo,
847 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
849 estats->pause_frames_sent_hi =
850 pstats->mac_stx[1].tx_stat_outxonsent_hi;
851 estats->pause_frames_sent_lo =
852 pstats->mac_stx[1].tx_stat_outxonsent_lo;
853 ADD_64(estats->pause_frames_sent_hi,
854 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
855 estats->pause_frames_sent_lo,
856 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
860 bxe_hw_stats_update(struct bxe_softc *sc)
862 struct nig_stats *new = BXE_SP(sc, nig_stats);
863 struct nig_stats *old = &(sc->port.old_nig_stats);
864 struct host_port_stats *pstats = BXE_SP(sc, port_stats);
865 struct bxe_eth_stats *estats = &sc->eth_stats;
866 uint32_t lpi_reg, nig_timer_max;
872 switch (sc->link_vars.mac_type) {
873 case ELINK_MAC_TYPE_BMAC:
874 bxe_bmac_stats_update(sc);
877 case ELINK_MAC_TYPE_EMAC:
878 bxe_emac_stats_update(sc);
881 case ELINK_MAC_TYPE_UMAC:
882 case ELINK_MAC_TYPE_XMAC:
883 bxe_mstat_stats_update(sc);
886 case ELINK_MAC_TYPE_NONE: /* unreached */
888 "stats updated by DMAE but no MAC active\n");
891 default: /* unreached */
892 BLOGE(sc, "stats update failed, unknown MAC type\n");
895 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
896 new->brb_discard - old->brb_discard);
897 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
898 new->brb_truncate - old->brb_truncate);
900 if (!CHIP_IS_E3(sc)) {
901 UPDATE_STAT64_NIG(egress_mac_pkt0,
902 etherstatspkts1024octetsto1522octets);
903 UPDATE_STAT64_NIG(egress_mac_pkt1,
904 etherstatspktsover1522octets);
907 memcpy(old, new, sizeof(struct nig_stats));
909 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
910 sizeof(struct mac_stx));
911 estats->brb_drop_hi = pstats->brb_drop_hi;
912 estats->brb_drop_lo = pstats->brb_drop_lo;
914 pstats->host_port_stats_counter++;
916 if (CHIP_IS_E3(sc)) {
917 lpi_reg = (SC_PORT(sc)) ?
918 MISC_REG_CPMU_LP_SM_ENT_CNT_P1 :
919 MISC_REG_CPMU_LP_SM_ENT_CNT_P0;
920 estats->eee_tx_lpi += REG_RD(sc, lpi_reg);
923 if (!BXE_NOMCP(sc)) {
924 nig_timer_max = SHMEM_RD(sc, port_mb[SC_PORT(sc)].stat_nig_timer);
925 if (nig_timer_max != estats->nig_timer_max) {
926 estats->nig_timer_max = nig_timer_max;
927 BLOGE(sc, "invalid NIG timer max (%u)\n",
928 estats->nig_timer_max);
936 bxe_storm_stats_validate_counters(struct bxe_softc *sc)
938 struct stats_counter *counters = &sc->fw_stats_data->storm_counters;
939 uint16_t cur_stats_counter;
942 * Make sure we use the value of the counter
943 * used for sending the last stats ramrod.
946 cur_stats_counter = (sc->stats_counter - 1);
947 BXE_STATS_UNLOCK(sc);
949 /* are storm stats valid? */
950 if (le16toh(counters->xstats_counter) != cur_stats_counter) {
952 "stats not updated by xstorm, "
953 "counter 0x%x != stats_counter 0x%x\n",
954 le16toh(counters->xstats_counter), sc->stats_counter);
958 if (le16toh(counters->ustats_counter) != cur_stats_counter) {
960 "stats not updated by ustorm, "
961 "counter 0x%x != stats_counter 0x%x\n",
962 le16toh(counters->ustats_counter), sc->stats_counter);
966 if (le16toh(counters->cstats_counter) != cur_stats_counter) {
968 "stats not updated by cstorm, "
969 "counter 0x%x != stats_counter 0x%x\n",
970 le16toh(counters->cstats_counter), sc->stats_counter);
974 if (le16toh(counters->tstats_counter) != cur_stats_counter) {
976 "stats not updated by tstorm, "
977 "counter 0x%x != stats_counter 0x%x\n",
978 le16toh(counters->tstats_counter), sc->stats_counter);
986 bxe_storm_stats_update(struct bxe_softc *sc)
988 struct tstorm_per_port_stats *tport =
989 &sc->fw_stats_data->port.tstorm_port_statistics;
990 struct tstorm_per_pf_stats *tfunc =
991 &sc->fw_stats_data->pf.tstorm_pf_statistics;
992 struct host_func_stats *fstats = &sc->func_stats;
993 struct bxe_eth_stats *estats = &sc->eth_stats;
994 struct bxe_eth_stats_old *estats_old = &sc->eth_stats_old;
997 /* vfs stat counter is managed by pf */
998 if (IS_PF(sc) && bxe_storm_stats_validate_counters(sc)) {
1002 estats->error_bytes_received_hi = 0;
1003 estats->error_bytes_received_lo = 0;
1005 for (i = 0; i < sc->num_queues; i++) {
1006 struct bxe_fastpath *fp = &sc->fp[i];
1007 struct tstorm_per_queue_stats *tclient =
1008 &sc->fw_stats_data->queue_stats[i].tstorm_queue_statistics;
1009 struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient;
1010 struct ustorm_per_queue_stats *uclient =
1011 &sc->fw_stats_data->queue_stats[i].ustorm_queue_statistics;
1012 struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient;
1013 struct xstorm_per_queue_stats *xclient =
1014 &sc->fw_stats_data->queue_stats[i].xstorm_queue_statistics;
1015 struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient;
1016 struct bxe_eth_q_stats *qstats = &fp->eth_q_stats;
1017 struct bxe_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
1021 BLOGD(sc, DBG_STATS,
1022 "queue[%d]: ucast_sent 0x%x bcast_sent 0x%x mcast_sent 0x%x\n",
1023 i, xclient->ucast_pkts_sent, xclient->bcast_pkts_sent,
1024 xclient->mcast_pkts_sent);
1026 BLOGD(sc, DBG_STATS, "---------------\n");
1028 UPDATE_QSTAT(tclient->rcv_bcast_bytes,
1029 total_broadcast_bytes_received);
1030 UPDATE_QSTAT(tclient->rcv_mcast_bytes,
1031 total_multicast_bytes_received);
1032 UPDATE_QSTAT(tclient->rcv_ucast_bytes,
1033 total_unicast_bytes_received);
1036 * sum to total_bytes_received all
1037 * unicast/multicast/broadcast
1039 qstats->total_bytes_received_hi =
1040 qstats->total_broadcast_bytes_received_hi;
1041 qstats->total_bytes_received_lo =
1042 qstats->total_broadcast_bytes_received_lo;
1044 ADD_64(qstats->total_bytes_received_hi,
1045 qstats->total_multicast_bytes_received_hi,
1046 qstats->total_bytes_received_lo,
1047 qstats->total_multicast_bytes_received_lo);
1049 ADD_64(qstats->total_bytes_received_hi,
1050 qstats->total_unicast_bytes_received_hi,
1051 qstats->total_bytes_received_lo,
1052 qstats->total_unicast_bytes_received_lo);
1054 qstats->valid_bytes_received_hi = qstats->total_bytes_received_hi;
1055 qstats->valid_bytes_received_lo = qstats->total_bytes_received_lo;
1057 UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, total_unicast_packets_received);
1058 UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, total_multicast_packets_received);
1059 UPDATE_EXTEND_TSTAT(rcv_bcast_pkts, total_broadcast_packets_received);
1060 UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard,
1061 etherstatsoverrsizepkts, 32);
1062 UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard, 16);
1064 SUB_EXTEND_USTAT(ucast_no_buff_pkts, total_unicast_packets_received);
1065 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
1066 total_multicast_packets_received);
1067 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
1068 total_broadcast_packets_received);
1069 UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard);
1070 UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard);
1071 UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard);
1073 UPDATE_QSTAT(xclient->bcast_bytes_sent,
1074 total_broadcast_bytes_transmitted);
1075 UPDATE_QSTAT(xclient->mcast_bytes_sent,
1076 total_multicast_bytes_transmitted);
1077 UPDATE_QSTAT(xclient->ucast_bytes_sent,
1078 total_unicast_bytes_transmitted);
1081 * sum to total_bytes_transmitted all
1082 * unicast/multicast/broadcast
1084 qstats->total_bytes_transmitted_hi =
1085 qstats->total_unicast_bytes_transmitted_hi;
1086 qstats->total_bytes_transmitted_lo =
1087 qstats->total_unicast_bytes_transmitted_lo;
1089 ADD_64(qstats->total_bytes_transmitted_hi,
1090 qstats->total_broadcast_bytes_transmitted_hi,
1091 qstats->total_bytes_transmitted_lo,
1092 qstats->total_broadcast_bytes_transmitted_lo);
1094 ADD_64(qstats->total_bytes_transmitted_hi,
1095 qstats->total_multicast_bytes_transmitted_hi,
1096 qstats->total_bytes_transmitted_lo,
1097 qstats->total_multicast_bytes_transmitted_lo);
1099 UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
1100 total_unicast_packets_transmitted);
1101 UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
1102 total_multicast_packets_transmitted);
1103 UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
1104 total_broadcast_packets_transmitted);
1106 UPDATE_EXTEND_TSTAT(checksum_discard,
1107 total_packets_received_checksum_discarded);
1108 UPDATE_EXTEND_TSTAT(ttl0_discard,
1109 total_packets_received_ttl0_discarded);
1111 UPDATE_EXTEND_XSTAT(error_drop_pkts,
1112 total_transmitted_dropped_packets_error);
1114 /* TPA aggregations completed */
1115 UPDATE_EXTEND_E_USTAT(coalesced_events, total_tpa_aggregations);
1116 /* Number of network frames aggregated by TPA */
1117 UPDATE_EXTEND_E_USTAT(coalesced_pkts, total_tpa_aggregated_frames);
1118 /* Total number of bytes in completed TPA aggregations */
1119 UPDATE_QSTAT(uclient->coalesced_bytes, total_tpa_bytes);
1121 UPDATE_ESTAT_QSTAT_64(total_tpa_bytes);
1123 UPDATE_FSTAT_QSTAT(total_bytes_received);
1124 UPDATE_FSTAT_QSTAT(total_bytes_transmitted);
1125 UPDATE_FSTAT_QSTAT(total_unicast_packets_received);
1126 UPDATE_FSTAT_QSTAT(total_multicast_packets_received);
1127 UPDATE_FSTAT_QSTAT(total_broadcast_packets_received);
1128 UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted);
1129 UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted);
1130 UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted);
1131 UPDATE_FSTAT_QSTAT(valid_bytes_received);
1134 ADD_64(estats->total_bytes_received_hi,
1135 estats->rx_stat_ifhcinbadoctets_hi,
1136 estats->total_bytes_received_lo,
1137 estats->rx_stat_ifhcinbadoctets_lo);
1139 ADD_64_LE(estats->total_bytes_received_hi,
1140 tfunc->rcv_error_bytes.hi,
1141 estats->total_bytes_received_lo,
1142 tfunc->rcv_error_bytes.lo);
1144 ADD_64_LE(estats->error_bytes_received_hi,
1145 tfunc->rcv_error_bytes.hi,
1146 estats->error_bytes_received_lo,
1147 tfunc->rcv_error_bytes.lo);
1149 UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong);
1151 ADD_64(estats->error_bytes_received_hi,
1152 estats->rx_stat_ifhcinbadoctets_hi,
1153 estats->error_bytes_received_lo,
1154 estats->rx_stat_ifhcinbadoctets_lo);
1157 struct bxe_fw_port_stats_old *fwstats = &sc->fw_stats_old;
1158 UPDATE_FW_STAT(mac_filter_discard);
1159 UPDATE_FW_STAT(mf_tag_discard);
1160 UPDATE_FW_STAT(brb_truncate_discard);
1161 UPDATE_FW_STAT(mac_discard);
1164 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
1166 sc->stats_pending = 0;
1172 bxe_net_stats_update(struct bxe_softc *sc)
1174 struct bxe_eth_stats *estats = &sc->eth_stats;
1175 struct ifnet *ifnet = sc->ifnet;
1179 ifnet->if_data.ifi_ipackets =
1180 bxe_hilo(&estats->total_unicast_packets_received_hi) +
1181 bxe_hilo(&estats->total_multicast_packets_received_hi) +
1182 bxe_hilo(&estats->total_broadcast_packets_received_hi);
1184 ifnet->if_data.ifi_opackets =
1185 bxe_hilo(&estats->total_unicast_packets_transmitted_hi) +
1186 bxe_hilo(&estats->total_multicast_packets_transmitted_hi) +
1187 bxe_hilo(&estats->total_broadcast_packets_transmitted_hi);
1189 ifnet->if_data.ifi_ibytes = bxe_hilo(&estats->total_bytes_received_hi);
1191 ifnet->if_data.ifi_obytes = bxe_hilo(&estats->total_bytes_transmitted_hi);
1194 for (i = 0; i < sc->num_queues; i++) {
1195 struct tstorm_per_queue_stats *old_tclient =
1196 &sc->fp[i].old_tclient;
1197 tmp += le32toh(old_tclient->checksum_discard);
1200 ifnet->if_data.ifi_iqdrops = tmp;
1202 ifnet->if_data.ifi_ierrors =
1203 bxe_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
1204 bxe_hilo(&estats->etherstatsoverrsizepkts_hi) +
1205 bxe_hilo(&estats->brb_drop_hi) +
1206 bxe_hilo(&estats->brb_truncate_hi) +
1207 bxe_hilo(&estats->rx_stat_dot3statsfcserrors_hi) +
1208 bxe_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi) +
1209 bxe_hilo(&estats->no_buff_discard_hi);
1211 ifnet->if_data.ifi_oerrors =
1212 bxe_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi) +
1213 bxe_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
1215 ifnet->if_data.ifi_imcasts =
1216 bxe_hilo(&estats->total_multicast_packets_received_hi);
1218 ifnet->if_data.ifi_collisions =
1219 bxe_hilo(&estats->tx_stat_etherstatscollisions_hi) +
1220 bxe_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
1221 bxe_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
1225 bxe_drv_stats_update(struct bxe_softc *sc)
1227 struct bxe_eth_stats *estats = &sc->eth_stats;
1230 for (i = 0; i < sc->num_queues; i++) {
1231 struct bxe_eth_q_stats *qstats = &sc->fp[i].eth_q_stats;
1232 struct bxe_eth_q_stats_old *qstats_old = &sc->fp[i].eth_q_stats_old;
1234 UPDATE_ESTAT_QSTAT(rx_calls);
1235 UPDATE_ESTAT_QSTAT(rx_pkts);
1236 UPDATE_ESTAT_QSTAT(rx_tpa_pkts);
1237 UPDATE_ESTAT_QSTAT(rx_soft_errors);
1238 UPDATE_ESTAT_QSTAT(rx_hw_csum_errors);
1239 UPDATE_ESTAT_QSTAT(rx_ofld_frames_csum_ip);
1240 UPDATE_ESTAT_QSTAT(rx_ofld_frames_csum_tcp_udp);
1241 UPDATE_ESTAT_QSTAT(rx_budget_reached);
1242 UPDATE_ESTAT_QSTAT(tx_pkts);
1243 UPDATE_ESTAT_QSTAT(tx_soft_errors);
1244 UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_ip);
1245 UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_tcp);
1246 UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_udp);
1247 UPDATE_ESTAT_QSTAT(tx_ofld_frames_lso);
1248 UPDATE_ESTAT_QSTAT(tx_ofld_frames_lso_hdr_splits);
1249 UPDATE_ESTAT_QSTAT(tx_encap_failures);
1250 UPDATE_ESTAT_QSTAT(tx_hw_queue_full);
1251 UPDATE_ESTAT_QSTAT(tx_hw_max_queue_depth);
1252 UPDATE_ESTAT_QSTAT(tx_dma_mapping_failure);
1253 UPDATE_ESTAT_QSTAT(tx_max_drbr_queue_depth);
1254 UPDATE_ESTAT_QSTAT(tx_window_violation_std);
1255 UPDATE_ESTAT_QSTAT(tx_window_violation_tso);
1256 //UPDATE_ESTAT_QSTAT(tx_unsupported_tso_request_ipv6);
1257 //UPDATE_ESTAT_QSTAT(tx_unsupported_tso_request_not_tcp);
1258 UPDATE_ESTAT_QSTAT(tx_chain_lost_mbuf);
1259 UPDATE_ESTAT_QSTAT(tx_frames_deferred);
1260 UPDATE_ESTAT_QSTAT(tx_queue_xoff);
1262 /* mbuf driver statistics */
1263 UPDATE_ESTAT_QSTAT(mbuf_defrag_attempts);
1264 UPDATE_ESTAT_QSTAT(mbuf_defrag_failures);
1265 UPDATE_ESTAT_QSTAT(mbuf_rx_bd_alloc_failed);
1266 UPDATE_ESTAT_QSTAT(mbuf_rx_bd_mapping_failed);
1267 UPDATE_ESTAT_QSTAT(mbuf_rx_tpa_alloc_failed);
1268 UPDATE_ESTAT_QSTAT(mbuf_rx_tpa_mapping_failed);
1269 UPDATE_ESTAT_QSTAT(mbuf_rx_sge_alloc_failed);
1270 UPDATE_ESTAT_QSTAT(mbuf_rx_sge_mapping_failed);
1272 /* track the number of allocated mbufs */
1273 UPDATE_ESTAT_QSTAT(mbuf_alloc_tx);
1274 UPDATE_ESTAT_QSTAT(mbuf_alloc_rx);
1275 UPDATE_ESTAT_QSTAT(mbuf_alloc_sge);
1276 UPDATE_ESTAT_QSTAT(mbuf_alloc_tpa);
1281 bxe_edebug_stats_stopped(struct bxe_softc *sc)
1285 if (SHMEM2_HAS(sc, edebug_driver_if[1])) {
1286 val = SHMEM2_RD(sc, edebug_driver_if[1]);
1288 if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT) {
1297 bxe_stats_update(struct bxe_softc *sc)
1299 uint32_t *stats_comp = BXE_SP(sc, stats_comp);
1301 if (bxe_edebug_stats_stopped(sc)) {
1306 if (*stats_comp != DMAE_COMP_VAL) {
1311 bxe_hw_stats_update(sc);
1314 if (bxe_storm_stats_update(sc)) {
1315 if (sc->stats_pending++ == 3) {
1316 bxe_panic(sc, ("storm stats not updated for 3 times\n"));
1322 * VF doesn't collect HW statistics, and doesn't get completions,
1323 * performs only update.
1325 bxe_storm_stats_update(sc);
1328 bxe_net_stats_update(sc);
1329 bxe_drv_stats_update(sc);
1336 bxe_hw_stats_post(sc);
1337 bxe_storm_stats_post(sc);
1341 bxe_port_stats_stop(struct bxe_softc *sc)
1343 struct dmae_command *dmae;
1345 int loader_idx = PMF_DMAE_C(sc);
1346 uint32_t *stats_comp = BXE_SP(sc, stats_comp);
1348 sc->executer_idx = 0;
1350 opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC, FALSE, 0);
1352 if (sc->port.port_stx) {
1353 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
1356 dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
1358 dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
1361 dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
1362 dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
1363 dmae->dst_addr_lo = sc->port.port_stx >> 2;
1364 dmae->dst_addr_hi = 0;
1365 dmae->len = bxe_get_port_stats_dma_len(sc);
1367 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
1368 dmae->comp_addr_hi = 0;
1371 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
1372 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
1373 dmae->comp_val = DMAE_COMP_VAL;
1380 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
1381 dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
1382 dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, func_stats));
1383 dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, func_stats));
1384 dmae->dst_addr_lo = (sc->func_stx >> 2);
1385 dmae->dst_addr_hi = 0;
1386 dmae->len = (sizeof(struct host_func_stats) >> 2);
1387 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
1388 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
1389 dmae->comp_val = DMAE_COMP_VAL;
1396 bxe_stats_stop(struct bxe_softc *sc)
1398 uint8_t update = FALSE;
1403 update = bxe_hw_stats_update(sc) == 0;
1406 update |= bxe_storm_stats_update(sc) == 0;
1409 bxe_net_stats_update(sc);
1412 bxe_port_stats_stop(sc);
1415 bxe_hw_stats_post(sc);
1421 bxe_stats_do_nothing(struct bxe_softc *sc)
1426 static const struct {
1427 void (*action)(struct bxe_softc *sc);
1428 enum bxe_stats_state next_state;
1429 } bxe_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
1431 /* DISABLED PMF */ { bxe_stats_pmf_update, STATS_STATE_DISABLED },
1432 /* LINK_UP */ { bxe_stats_start, STATS_STATE_ENABLED },
1433 /* UPDATE */ { bxe_stats_do_nothing, STATS_STATE_DISABLED },
1434 /* STOP */ { bxe_stats_do_nothing, STATS_STATE_DISABLED }
1437 /* ENABLED PMF */ { bxe_stats_pmf_start, STATS_STATE_ENABLED },
1438 /* LINK_UP */ { bxe_stats_restart, STATS_STATE_ENABLED },
1439 /* UPDATE */ { bxe_stats_update, STATS_STATE_ENABLED },
1440 /* STOP */ { bxe_stats_stop, STATS_STATE_DISABLED }
1444 void bxe_stats_handle(struct bxe_softc *sc,
1445 enum bxe_stats_event event)
1447 enum bxe_stats_state state;
1449 if (__predict_false(sc->panic)) {
1454 state = sc->stats_state;
1455 sc->stats_state = bxe_stats_stm[state][event].next_state;
1456 BXE_STATS_UNLOCK(sc);
1458 bxe_stats_stm[state][event].action(sc);
1460 if (event != STATS_EVENT_UPDATE) {
1461 BLOGD(sc, DBG_STATS,
1462 "state %d -> event %d -> state %d\n",
1463 state, event, sc->stats_state);
1468 bxe_port_stats_base_init(struct bxe_softc *sc)
1470 struct dmae_command *dmae;
1471 uint32_t *stats_comp = BXE_SP(sc, stats_comp);
1474 if (!sc->port.pmf || !sc->port.port_stx) {
1475 BLOGE(sc, "BUG!\n");
1479 sc->executer_idx = 0;
1481 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
1482 dmae->opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
1483 TRUE, DMAE_COMP_PCI);
1484 dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
1485 dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
1486 dmae->dst_addr_lo = (sc->port.port_stx >> 2);
1487 dmae->dst_addr_hi = 0;
1488 dmae->len = bxe_get_port_stats_dma_len(sc);
1489 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
1490 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
1491 dmae->comp_val = DMAE_COMP_VAL;
1494 bxe_hw_stats_post(sc);
1499 * This function will prepare the statistics ramrod data the way
1500 * we will only have to increment the statistics counter and
1501 * send the ramrod each time we have to.
1504 bxe_prep_fw_stats_req(struct bxe_softc *sc)
1507 int first_queue_query_index;
1508 struct stats_query_header *stats_hdr = &sc->fw_stats_req->hdr;
1509 bus_addr_t cur_data_offset;
1510 struct stats_query_entry *cur_query_entry;
1512 stats_hdr->cmd_num = sc->fw_stats_num;
1513 stats_hdr->drv_stats_counter = 0;
1516 * The storm_counters struct contains the counters of completed
1517 * statistics requests per storm which are incremented by FW
1518 * each time it completes hadning a statistics ramrod. We will
1519 * check these counters in the timer handler and discard a
1520 * (statistics) ramrod completion.
1522 cur_data_offset = (sc->fw_stats_data_mapping +
1523 offsetof(struct bxe_fw_stats_data, storm_counters));
1525 stats_hdr->stats_counters_addrs.hi = htole32(U64_HI(cur_data_offset));
1526 stats_hdr->stats_counters_addrs.lo = htole32(U64_LO(cur_data_offset));
1529 * Prepare the first stats ramrod (will be completed with
1530 * the counters equal to zero) - init counters to somethig different.
1532 memset(&sc->fw_stats_data->storm_counters, 0xff,
1533 sizeof(struct stats_counter));
1535 /**** Port FW statistics data ****/
1536 cur_data_offset = (sc->fw_stats_data_mapping +
1537 offsetof(struct bxe_fw_stats_data, port));
1539 cur_query_entry = &sc->fw_stats_req->query[BXE_PORT_QUERY_IDX];
1541 cur_query_entry->kind = STATS_TYPE_PORT;
1542 /* For port query index is a DONT CARE */
1543 cur_query_entry->index = SC_PORT(sc);
1544 /* For port query funcID is a DONT CARE */
1545 cur_query_entry->funcID = htole16(SC_FUNC(sc));
1546 cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1547 cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1549 /**** PF FW statistics data ****/
1550 cur_data_offset = (sc->fw_stats_data_mapping +
1551 offsetof(struct bxe_fw_stats_data, pf));
1553 cur_query_entry = &sc->fw_stats_req->query[BXE_PF_QUERY_IDX];
1555 cur_query_entry->kind = STATS_TYPE_PF;
1556 /* For PF query index is a DONT CARE */
1557 cur_query_entry->index = SC_PORT(sc);
1558 cur_query_entry->funcID = htole16(SC_FUNC(sc));
1559 cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1560 cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1563 /**** FCoE FW statistics data ****/
1565 cur_data_offset = (sc->fw_stats_data_mapping +
1566 offsetof(struct bxe_fw_stats_data, fcoe));
1568 cur_query_entry = &sc->fw_stats_req->query[BXE_FCOE_QUERY_IDX];
1570 cur_query_entry->kind = STATS_TYPE_FCOE;
1571 /* For FCoE query index is a DONT CARE */
1572 cur_query_entry->index = SC_PORT(sc);
1573 cur_query_entry->funcID = cpu_to_le16(SC_FUNC(sc));
1574 cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1575 cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1579 /**** Clients' queries ****/
1580 cur_data_offset = (sc->fw_stats_data_mapping +
1581 offsetof(struct bxe_fw_stats_data, queue_stats));
1584 * First queue query index depends whether FCoE offloaded request will
1585 * be included in the ramrod
1589 first_queue_query_index = BXE_FIRST_QUEUE_QUERY_IDX;
1592 first_queue_query_index = (BXE_FIRST_QUEUE_QUERY_IDX - 1);
1594 for (i = 0; i < sc->num_queues; i++) {
1596 &sc->fw_stats_req->query[first_queue_query_index + i];
1598 cur_query_entry->kind = STATS_TYPE_QUEUE;
1599 cur_query_entry->index = bxe_stats_id(&sc->fp[i]);
1600 cur_query_entry->funcID = htole16(SC_FUNC(sc));
1601 cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1602 cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1604 cur_data_offset += sizeof(struct per_queue_stats);
1608 /* add FCoE queue query if needed */
1611 &sc->fw_stats_req->query[first_queue_query_index + i];
1613 cur_query_entry->kind = STATS_TYPE_QUEUE;
1614 cur_query_entry->index = bxe_stats_id(&sc->fp[FCOE_IDX(sc)]);
1615 cur_query_entry->funcID = htole16(SC_FUNC(sc));
1616 cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1617 cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1623 bxe_stats_init(struct bxe_softc *sc)
1625 int /*abs*/port = SC_PORT(sc);
1626 int mb_idx = SC_FW_MB_IDX(sc);
1629 sc->stats_pending = 0;
1630 sc->executer_idx = 0;
1631 sc->stats_counter = 0;
1633 /* port and func stats for management */
1634 if (!BXE_NOMCP(sc)) {
1635 sc->port.port_stx = SHMEM_RD(sc, port_mb[port].port_stx);
1636 sc->func_stx = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_param);
1638 sc->port.port_stx = 0;
1642 BLOGD(sc, DBG_STATS, "port_stx 0x%x func_stx 0x%x\n",
1643 sc->port.port_stx, sc->func_stx);
1645 /* pmf should retrieve port statistics from SP on a non-init*/
1646 if (!sc->stats_init && sc->port.pmf && sc->port.port_stx) {
1647 bxe_stats_handle(sc, STATS_EVENT_PMF);
1652 memset(&(sc->port.old_nig_stats), 0, sizeof(struct nig_stats));
1653 sc->port.old_nig_stats.brb_discard =
1654 REG_RD(sc, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
1655 sc->port.old_nig_stats.brb_truncate =
1656 REG_RD(sc, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
1657 if (!CHIP_IS_E3(sc)) {
1658 REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
1659 &(sc->port.old_nig_stats.egress_mac_pkt0_lo), 2);
1660 REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
1661 &(sc->port.old_nig_stats.egress_mac_pkt1_lo), 2);
1664 /* function stats */
1665 for (i = 0; i < sc->num_queues; i++) {
1666 memset(&sc->fp[i].old_tclient, 0, sizeof(sc->fp[i].old_tclient));
1667 memset(&sc->fp[i].old_uclient, 0, sizeof(sc->fp[i].old_uclient));
1668 memset(&sc->fp[i].old_xclient, 0, sizeof(sc->fp[i].old_xclient));
1669 if (sc->stats_init) {
1670 memset(&sc->fp[i].eth_q_stats, 0,
1671 sizeof(sc->fp[i].eth_q_stats));
1672 memset(&sc->fp[i].eth_q_stats_old, 0,
1673 sizeof(sc->fp[i].eth_q_stats_old));
1677 /* prepare statistics ramrod data */
1678 bxe_prep_fw_stats_req(sc);
1680 sc->ifnet->if_data.ifi_ipackets = 0;
1681 sc->ifnet->if_data.ifi_opackets = 0;
1682 sc->ifnet->if_data.ifi_ibytes = 0;
1683 sc->ifnet->if_data.ifi_obytes = 0;
1684 sc->ifnet->if_data.ifi_ierrors = 0;
1685 sc->ifnet->if_data.ifi_oerrors = 0;
1686 sc->ifnet->if_data.ifi_imcasts = 0;
1687 sc->ifnet->if_data.ifi_collisions = 0;
1689 if (sc->stats_init) {
1690 memset(&sc->net_stats_old, 0, sizeof(sc->net_stats_old));
1691 memset(&sc->fw_stats_old, 0, sizeof(sc->fw_stats_old));
1692 memset(&sc->eth_stats_old, 0, sizeof(sc->eth_stats_old));
1693 memset(&sc->eth_stats, 0, sizeof(sc->eth_stats));
1694 memset(&sc->func_stats, 0, sizeof(sc->func_stats));
1696 /* Clean SP from previous statistics */
1698 memset(BXE_SP(sc, func_stats), 0, sizeof(struct host_func_stats));
1699 bxe_func_stats_init(sc);
1700 bxe_hw_stats_post(sc);
1705 sc->stats_state = STATS_STATE_DISABLED;
1707 if (sc->port.pmf && sc->port.port_stx) {
1708 bxe_port_stats_base_init(sc);
1711 /* mark the end of statistics initializiation */
1712 sc->stats_init = FALSE;
1716 bxe_save_statistics(struct bxe_softc *sc)
1720 /* save queue statistics */
1721 for (i = 0; i < sc->num_queues; i++) {
1722 struct bxe_fastpath *fp = &sc->fp[i];
1723 struct bxe_eth_q_stats *qstats = &fp->eth_q_stats;
1724 struct bxe_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
1726 UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
1727 UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
1728 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi);
1729 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo);
1730 UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi);
1731 UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo);
1732 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi);
1733 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo);
1734 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi);
1735 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo);
1736 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi);
1737 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo);
1738 UPDATE_QSTAT_OLD(total_tpa_bytes_hi);
1739 UPDATE_QSTAT_OLD(total_tpa_bytes_lo);
1742 /* save net_device_stats statistics */
1743 sc->net_stats_old.rx_dropped = sc->ifnet->if_data.ifi_iqdrops;
1745 /* store port firmware statistics */
1747 struct bxe_eth_stats *estats = &sc->eth_stats;
1748 struct bxe_fw_port_stats_old *fwstats = &sc->fw_stats_old;
1749 struct host_port_stats *pstats = BXE_SP(sc, port_stats);
1751 fwstats->pfc_frames_rx_hi = pstats->pfc_frames_rx_hi;
1752 fwstats->pfc_frames_rx_lo = pstats->pfc_frames_rx_lo;
1753 fwstats->pfc_frames_tx_hi = pstats->pfc_frames_tx_hi;
1754 fwstats->pfc_frames_tx_lo = pstats->pfc_frames_tx_lo;
1757 UPDATE_FW_STAT_OLD(mac_filter_discard);
1758 UPDATE_FW_STAT_OLD(mf_tag_discard);
1759 UPDATE_FW_STAT_OLD(brb_truncate_discard);
1760 UPDATE_FW_STAT_OLD(mac_discard);
1766 bxe_afex_collect_stats(struct bxe_softc *sc,
1767 void *void_afex_stats,
1768 uint32_t stats_type)
1771 struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats;
1772 struct bxe_eth_stats *estats = &sc->eth_stats;
1774 struct per_queue_stats *fcoe_q_stats =
1775 &sc->fw_stats_data->queue_stats[FCOE_IDX(sc)];
1777 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
1778 &fcoe_q_stats->tstorm_queue_statistics;
1780 struct ustorm_per_queue_stats *fcoe_q_ustorm_stats =
1781 &fcoe_q_stats->ustorm_queue_statistics;
1783 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
1784 &fcoe_q_stats->xstorm_queue_statistics;
1786 struct fcoe_statistics_params *fw_fcoe_stat =
1787 &sc->fw_stats_data->fcoe;
1790 memset(afex_stats, 0, sizeof(struct afex_stats));
1792 for (i = 0; i < sc->num_queues; i++) {
1793 struct bxe_eth_q_stats *qstats = &sc->fp[i].eth_q_stats;
1795 ADD_64(afex_stats->rx_unicast_bytes_hi,
1796 qstats->total_unicast_bytes_received_hi,
1797 afex_stats->rx_unicast_bytes_lo,
1798 qstats->total_unicast_bytes_received_lo);
1800 ADD_64(afex_stats->rx_broadcast_bytes_hi,
1801 qstats->total_broadcast_bytes_received_hi,
1802 afex_stats->rx_broadcast_bytes_lo,
1803 qstats->total_broadcast_bytes_received_lo);
1805 ADD_64(afex_stats->rx_multicast_bytes_hi,
1806 qstats->total_multicast_bytes_received_hi,
1807 afex_stats->rx_multicast_bytes_lo,
1808 qstats->total_multicast_bytes_received_lo);
1810 ADD_64(afex_stats->rx_unicast_frames_hi,
1811 qstats->total_unicast_packets_received_hi,
1812 afex_stats->rx_unicast_frames_lo,
1813 qstats->total_unicast_packets_received_lo);
1815 ADD_64(afex_stats->rx_broadcast_frames_hi,
1816 qstats->total_broadcast_packets_received_hi,
1817 afex_stats->rx_broadcast_frames_lo,
1818 qstats->total_broadcast_packets_received_lo);
1820 ADD_64(afex_stats->rx_multicast_frames_hi,
1821 qstats->total_multicast_packets_received_hi,
1822 afex_stats->rx_multicast_frames_lo,
1823 qstats->total_multicast_packets_received_lo);
1826 * sum to rx_frames_discarded all discarded
1827 * packets due to size, ttl0 and checksum
1829 ADD_64(afex_stats->rx_frames_discarded_hi,
1830 qstats->total_packets_received_checksum_discarded_hi,
1831 afex_stats->rx_frames_discarded_lo,
1832 qstats->total_packets_received_checksum_discarded_lo);
1834 ADD_64(afex_stats->rx_frames_discarded_hi,
1835 qstats->total_packets_received_ttl0_discarded_hi,
1836 afex_stats->rx_frames_discarded_lo,
1837 qstats->total_packets_received_ttl0_discarded_lo);
1839 ADD_64(afex_stats->rx_frames_discarded_hi,
1840 qstats->etherstatsoverrsizepkts_hi,
1841 afex_stats->rx_frames_discarded_lo,
1842 qstats->etherstatsoverrsizepkts_lo);
1844 ADD_64(afex_stats->rx_frames_dropped_hi,
1845 qstats->no_buff_discard_hi,
1846 afex_stats->rx_frames_dropped_lo,
1847 qstats->no_buff_discard_lo);
1849 ADD_64(afex_stats->tx_unicast_bytes_hi,
1850 qstats->total_unicast_bytes_transmitted_hi,
1851 afex_stats->tx_unicast_bytes_lo,
1852 qstats->total_unicast_bytes_transmitted_lo);
1854 ADD_64(afex_stats->tx_broadcast_bytes_hi,
1855 qstats->total_broadcast_bytes_transmitted_hi,
1856 afex_stats->tx_broadcast_bytes_lo,
1857 qstats->total_broadcast_bytes_transmitted_lo);
1859 ADD_64(afex_stats->tx_multicast_bytes_hi,
1860 qstats->total_multicast_bytes_transmitted_hi,
1861 afex_stats->tx_multicast_bytes_lo,
1862 qstats->total_multicast_bytes_transmitted_lo);
1864 ADD_64(afex_stats->tx_unicast_frames_hi,
1865 qstats->total_unicast_packets_transmitted_hi,
1866 afex_stats->tx_unicast_frames_lo,
1867 qstats->total_unicast_packets_transmitted_lo);
1869 ADD_64(afex_stats->tx_broadcast_frames_hi,
1870 qstats->total_broadcast_packets_transmitted_hi,
1871 afex_stats->tx_broadcast_frames_lo,
1872 qstats->total_broadcast_packets_transmitted_lo);
1874 ADD_64(afex_stats->tx_multicast_frames_hi,
1875 qstats->total_multicast_packets_transmitted_hi,
1876 afex_stats->tx_multicast_frames_lo,
1877 qstats->total_multicast_packets_transmitted_lo);
1879 ADD_64(afex_stats->tx_frames_dropped_hi,
1880 qstats->total_transmitted_dropped_packets_error_hi,
1881 afex_stats->tx_frames_dropped_lo,
1882 qstats->total_transmitted_dropped_packets_error_lo);
1887 * Now add FCoE statistics which are collected separately
1888 * (both offloaded and non offloaded)
1891 ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
1893 afex_stats->rx_unicast_bytes_lo,
1894 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
1896 ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
1897 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
1898 afex_stats->rx_unicast_bytes_lo,
1899 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
1901 ADD_64_LE(afex_stats->rx_broadcast_bytes_hi,
1902 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
1903 afex_stats->rx_broadcast_bytes_lo,
1904 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
1906 ADD_64_LE(afex_stats->rx_multicast_bytes_hi,
1907 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
1908 afex_stats->rx_multicast_bytes_lo,
1909 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
1911 ADD_64_LE(afex_stats->rx_unicast_frames_hi,
1913 afex_stats->rx_unicast_frames_lo,
1914 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
1916 ADD_64_LE(afex_stats->rx_unicast_frames_hi,
1918 afex_stats->rx_unicast_frames_lo,
1919 fcoe_q_tstorm_stats->rcv_ucast_pkts);
1921 ADD_64_LE(afex_stats->rx_broadcast_frames_hi,
1923 afex_stats->rx_broadcast_frames_lo,
1924 fcoe_q_tstorm_stats->rcv_bcast_pkts);
1926 ADD_64_LE(afex_stats->rx_multicast_frames_hi,
1928 afex_stats->rx_multicast_frames_lo,
1929 fcoe_q_tstorm_stats->rcv_ucast_pkts);
1931 ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1933 afex_stats->rx_frames_discarded_lo,
1934 fcoe_q_tstorm_stats->checksum_discard);
1936 ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1938 afex_stats->rx_frames_discarded_lo,
1939 fcoe_q_tstorm_stats->pkts_too_big_discard);
1941 ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1943 afex_stats->rx_frames_discarded_lo,
1944 fcoe_q_tstorm_stats->ttl0_discard);
1946 ADD_64_LE16(afex_stats->rx_frames_dropped_hi,
1948 afex_stats->rx_frames_dropped_lo,
1949 fcoe_q_tstorm_stats->no_buff_discard);
1951 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1953 afex_stats->rx_frames_dropped_lo,
1954 fcoe_q_ustorm_stats->ucast_no_buff_pkts);
1956 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1958 afex_stats->rx_frames_dropped_lo,
1959 fcoe_q_ustorm_stats->mcast_no_buff_pkts);
1961 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1963 afex_stats->rx_frames_dropped_lo,
1964 fcoe_q_ustorm_stats->bcast_no_buff_pkts);
1966 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1968 afex_stats->rx_frames_dropped_lo,
1969 fw_fcoe_stat->rx_stat1.fcoe_rx_drop_pkt_cnt);
1971 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1973 afex_stats->rx_frames_dropped_lo,
1974 fw_fcoe_stat->rx_stat2.fcoe_rx_drop_pkt_cnt);
1976 ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
1978 afex_stats->tx_unicast_bytes_lo,
1979 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
1981 ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
1982 fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
1983 afex_stats->tx_unicast_bytes_lo,
1984 fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
1986 ADD_64_LE(afex_stats->tx_broadcast_bytes_hi,
1987 fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
1988 afex_stats->tx_broadcast_bytes_lo,
1989 fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
1991 ADD_64_LE(afex_stats->tx_multicast_bytes_hi,
1992 fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
1993 afex_stats->tx_multicast_bytes_lo,
1994 fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
1996 ADD_64_LE(afex_stats->tx_unicast_frames_hi,
1998 afex_stats->tx_unicast_frames_lo,
1999 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
2001 ADD_64_LE(afex_stats->tx_unicast_frames_hi,
2003 afex_stats->tx_unicast_frames_lo,
2004 fcoe_q_xstorm_stats->ucast_pkts_sent);
2006 ADD_64_LE(afex_stats->tx_broadcast_frames_hi,
2008 afex_stats->tx_broadcast_frames_lo,
2009 fcoe_q_xstorm_stats->bcast_pkts_sent);
2011 ADD_64_LE(afex_stats->tx_multicast_frames_hi,
2013 afex_stats->tx_multicast_frames_lo,
2014 fcoe_q_xstorm_stats->mcast_pkts_sent);
2016 ADD_64_LE(afex_stats->tx_frames_dropped_hi,
2018 afex_stats->tx_frames_dropped_lo,
2019 fcoe_q_xstorm_stats->error_drop_pkts);
2024 * If port stats are requested, add them to the PMF
2025 * stats, as anyway they will be accumulated by the
2026 * MCP before sent to the switch
2028 if ((sc->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) {
2029 ADD_64(afex_stats->rx_frames_dropped_hi,
2031 afex_stats->rx_frames_dropped_lo,
2032 estats->mac_filter_discard);
2033 ADD_64(afex_stats->rx_frames_dropped_hi,
2035 afex_stats->rx_frames_dropped_lo,
2036 estats->brb_truncate_discard);
2037 ADD_64(afex_stats->rx_frames_discarded_hi,
2039 afex_stats->rx_frames_discarded_lo,
2040 estats->mac_discard);