1 /**************************************************************************
3 Copyright (c) 2007, Chelsio Inc.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Chelsio Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include <dev/cxgb/common/cxgb_common.h>
38 #include <dev/cxgb/common/cxgb_regs.h>
41 * # of exact address filters. The first one is used for the station address,
42 * the rest are available for multicast addresses.
44 #define EXACT_ADDR_FILTERS 8
46 static inline int macidx(const struct cmac *mac)
48 return mac->offset / (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR);
51 static void xaui_serdes_reset(struct cmac *mac)
53 static const unsigned int clear[] = {
54 F_PWRDN0 | F_PWRDN1, F_RESETPLL01, F_RESET0 | F_RESET1,
55 F_PWRDN2 | F_PWRDN3, F_RESETPLL23, F_RESET2 | F_RESET3
59 adapter_t *adap = mac->adapter;
60 u32 ctrl = A_XGM_SERDES_CTRL0 + mac->offset;
62 t3_write_reg(adap, ctrl, adap->params.vpd.xauicfg[macidx(mac)] |
63 F_RESET3 | F_RESET2 | F_RESET1 | F_RESET0 |
64 F_PWRDN3 | F_PWRDN2 | F_PWRDN1 | F_PWRDN0 |
65 F_RESETPLL23 | F_RESETPLL01);
66 (void)t3_read_reg(adap, ctrl);
69 for (i = 0; i < ARRAY_SIZE(clear); i++) {
70 t3_set_reg_field(adap, ctrl, clear[i], 0);
75 void t3b_pcs_reset(struct cmac *mac)
77 t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
80 t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, 0,
84 int t3_mac_reset(struct cmac *mac)
86 static struct addr_val_pair mac_reset_avp[] = {
89 { A_XGM_RX_CFG, F_DISPAUSEFRAMES | F_EN1536BFRAMES |
90 F_RMFCS | F_ENJUMBO | F_ENHASHMCAST },
91 { A_XGM_RX_HASH_LOW, 0 },
92 { A_XGM_RX_HASH_HIGH, 0 },
93 { A_XGM_RX_EXACT_MATCH_LOW_1, 0 },
94 { A_XGM_RX_EXACT_MATCH_LOW_2, 0 },
95 { A_XGM_RX_EXACT_MATCH_LOW_3, 0 },
96 { A_XGM_RX_EXACT_MATCH_LOW_4, 0 },
97 { A_XGM_RX_EXACT_MATCH_LOW_5, 0 },
98 { A_XGM_RX_EXACT_MATCH_LOW_6, 0 },
99 { A_XGM_RX_EXACT_MATCH_LOW_7, 0 },
100 { A_XGM_RX_EXACT_MATCH_LOW_8, 0 },
101 { A_XGM_STAT_CTRL, F_CLRSTATS }
104 adapter_t *adap = mac->adapter;
105 unsigned int oft = mac->offset;
107 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
108 (void) t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
110 t3_write_regs(adap, mac_reset_avp, ARRAY_SIZE(mac_reset_avp), oft);
111 t3_set_reg_field(adap, A_XGM_RXFIFO_CFG + oft,
112 F_RXSTRFRWRD | F_DISERRFRAMES,
113 uses_xaui(adap) ? 0 : F_RXSTRFRWRD);
115 if (uses_xaui(adap)) {
116 if (adap->params.rev == 0) {
117 t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
118 F_RXENABLE | F_TXENABLE);
119 if (t3_wait_op_done(adap, A_XGM_SERDES_STATUS1 + oft,
120 F_CMULOCK, 1, 5, 2)) {
122 "MAC %d XAUI SERDES CMU lock failed\n",
126 t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
129 xaui_serdes_reset(mac);
135 else if (uses_xaui(adap))
136 val |= F_PCS_RESET_ | F_XG2G_RESET_;
138 val |= F_RGMII_RESET_ | F_XG2G_RESET_;
139 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
140 (void) t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
141 if ((val & F_PCS_RESET_) && adap->params.rev) {
146 memset(&mac->stats, 0, sizeof(mac->stats));
150 static int t3b2_mac_reset(struct cmac *mac)
153 adapter_t *adap = mac->adapter;
154 unsigned int oft = mac->offset;
157 /* Stop egress traffic to xgm*/
159 t3_set_reg_field(adap, A_MPS_CFG, F_PORT0ACTIVE, 0);
161 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE, 0);
164 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
165 (void) t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
169 /* Check for xgm Rx fifo empty */
170 if (t3_wait_op_done(adap, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT + oft,
171 0x80000000, 1, 5, 2)) {
172 CH_ERR(adap, "MAC %d Rx fifo drain failed\n",
177 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, 0); /*MAC in reset*/
178 (void) t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
183 else if (uses_xaui(adap))
184 val |= F_PCS_RESET_ | F_XG2G_RESET_;
186 val |= F_RGMII_RESET_ | F_XG2G_RESET_;
187 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
188 (void) t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
189 if ((val & F_PCS_RESET_) && adap->params.rev) {
193 t3_write_reg(adap, A_XGM_RX_CFG + oft,
194 F_DISPAUSEFRAMES | F_EN1536BFRAMES |
195 F_RMFCS | F_ENJUMBO | F_ENHASHMCAST );
197 /*Resume egress traffic to xgm*/
199 t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT0ACTIVE);
201 t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT1ACTIVE);
207 * Set the exact match register 'idx' to recognize the given Ethernet address.
209 static void set_addr_filter(struct cmac *mac, int idx, const u8 *addr)
211 u32 addr_lo, addr_hi;
212 unsigned int oft = mac->offset + idx * 8;
214 addr_lo = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
215 addr_hi = (addr[5] << 8) | addr[4];
217 t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1 + oft, addr_lo);
218 t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_HIGH_1 + oft, addr_hi);
221 /* Set one of the station's unicast MAC addresses. */
222 int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6])
224 if (idx >= mac->nucast)
226 set_addr_filter(mac, idx, addr);
231 * Specify the number of exact address filters that should be reserved for
232 * unicast addresses. Caller should reload the unicast and multicast addresses
233 * after calling this.
235 int t3_mac_set_num_ucast(struct cmac *mac, int n)
237 if (n > EXACT_ADDR_FILTERS)
243 /* Calculate the RX hash filter index of an Ethernet address */
244 static int hash_hw_addr(const u8 *addr)
246 int hash = 0, octet, bit, i = 0, c;
248 for (octet = 0; octet < 6; ++octet)
249 for (c = addr[octet], bit = 0; bit < 8; c >>= 1, ++bit) {
250 hash ^= (c & 1) << i;
257 int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm)
259 u32 val, hash_lo, hash_hi;
260 adapter_t *adap = mac->adapter;
261 unsigned int oft = mac->offset;
263 val = t3_read_reg(adap, A_XGM_RX_CFG + oft) & ~F_COPYALLFRAMES;
264 if (promisc_rx_mode(rm))
265 val |= F_COPYALLFRAMES;
266 t3_write_reg(adap, A_XGM_RX_CFG + oft, val);
268 if (allmulti_rx_mode(rm))
269 hash_lo = hash_hi = 0xffffffff;
272 int exact_addr_idx = mac->nucast;
274 hash_lo = hash_hi = 0;
275 while ((addr = t3_get_next_mcaddr(rm)))
276 if (exact_addr_idx < EXACT_ADDR_FILTERS)
277 set_addr_filter(mac, exact_addr_idx++, addr);
279 int hash = hash_hw_addr(addr);
282 hash_lo |= (1 << hash);
284 hash_hi |= (1 << (hash - 32));
288 t3_write_reg(adap, A_XGM_RX_HASH_LOW + oft, hash_lo);
289 t3_write_reg(adap, A_XGM_RX_HASH_HIGH + oft, hash_hi);
293 int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
296 unsigned int thres, v;
297 adapter_t *adap = mac->adapter;
300 * MAX_FRAME_SIZE inludes header + FCS, mtu doesn't. The HW max
301 * packet size register includes header, but not FCS.
304 if (mtu > MAX_FRAME_SIZE - 4)
306 t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu);
309 * Adjust the PAUSE frame watermarks. We always set the LWM, and the
310 * HWM only if flow-control is enabled.
312 hwm = max_t(unsigned int, MAC_RXFIFO_SIZE - 3 * mtu,
313 MAC_RXFIFO_SIZE * 38 / 100);
314 hwm = min(hwm, MAC_RXFIFO_SIZE - 8192);
315 lwm = min(3 * (int) mtu, MAC_RXFIFO_SIZE /4);
316 v = t3_read_reg(adap, A_XGM_RXFIFO_CFG + mac->offset);
317 v &= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM);
318 v |= V_RXFIFOPAUSELWM(lwm / 8);
319 if (G_RXFIFOPAUSEHWM(v))
320 v = (v & ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM)) |
321 V_RXFIFOPAUSEHWM(hwm / 8);
322 t3_write_reg(adap, A_XGM_RXFIFO_CFG + mac->offset, v);
324 /* Adjust the TX FIFO threshold based on the MTU */
325 thres = (adap->params.vpd.cclk * 1000) / 15625;
326 thres = (thres * mtu) / 1000;
329 thres = mtu > thres ? (mtu - thres + 7) / 8 : 0;
330 thres = max(thres, 8U); /* need at least 8 */
331 t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset,
332 V_TXFIFOTHRESH(M_TXFIFOTHRESH) | V_TXIPG(M_TXIPG),
333 V_TXFIFOTHRESH(thres) | V_TXIPG(1));
335 /* Assuming a minimum drain rate of 2.5Gbps...
337 if (adap->params.rev > 0)
338 t3_write_reg(adap, A_XGM_PAUSE_TIMER + mac->offset,
340 t3_write_reg(adap, A_XGM_TX_PAUSE_QUANTA + mac->offset,
341 MAC_RXFIFO_SIZE * 4 * 8 / 512);
345 int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc)
348 adapter_t *adap = mac->adapter;
349 unsigned int oft = mac->offset;
351 if (duplex >= 0 && duplex != DUPLEX_FULL)
354 if (speed == SPEED_10)
355 val = V_PORTSPEED(0);
356 else if (speed == SPEED_100)
357 val = V_PORTSPEED(1);
358 else if (speed == SPEED_1000)
359 val = V_PORTSPEED(2);
360 else if (speed == SPEED_10000)
361 val = V_PORTSPEED(3);
365 t3_set_reg_field(adap, A_XGM_PORT_CFG + oft,
366 V_PORTSPEED(M_PORTSPEED), val);
369 val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft);
370 val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM);
372 val |= V_RXFIFOPAUSEHWM(G_RXFIFOPAUSELWM(val) + 128); /* +1KB */
373 t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val);
375 t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN,
376 (fc & PAUSE_RX) ? F_TXPAUSEEN : 0);
380 int t3_mac_enable(struct cmac *mac, int which)
382 int idx = macidx(mac);
383 adapter_t *adap = mac->adapter;
384 unsigned int oft = mac->offset;
385 struct mac_stats *s = &mac->stats;
387 if (which & MAC_DIRECTION_TX) {
388 t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN);
389 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
390 t3_write_reg(adap, A_TP_PIO_DATA, 0xc0ede401);
391 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
392 t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 1 << idx);
394 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CNT_CH0 + idx);
395 mac->tx_mcnt = s->tx_frames;
396 mac->tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap,
398 mac->tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
399 A_XGM_TX_SPI4_SOP_EOP_CNT +
401 mac->rx_mcnt = s->rx_frames;
402 mac->rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
403 A_XGM_RX_SPI4_SOP_EOP_CNT +
408 if (which & MAC_DIRECTION_RX)
409 t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN);
413 int t3_mac_disable(struct cmac *mac, int which)
415 int idx = macidx(mac);
416 adapter_t *adap = mac->adapter;
419 if (which & MAC_DIRECTION_TX) {
420 t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
421 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
422 t3_write_reg(adap, A_TP_PIO_DATA, 0xc000001f);
423 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
424 t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 1 << idx);
427 if (which & MAC_DIRECTION_RX) {
428 t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
431 t3_write_reg(adap, A_XGM_RX_CTRL + mac->offset, 0);
435 else if (uses_xaui(adap))
436 val |= F_PCS_RESET_ | F_XG2G_RESET_;
438 val |= F_RGMII_RESET_ | F_XG2G_RESET_;
439 t3_write_reg(mac->adapter, A_XGM_RESET_CTRL + mac->offset, val);
444 int t3b2_mac_watchdog_task(struct cmac *mac)
447 unsigned int tx_tcnt, tx_xcnt;
448 adapter_t *adap = mac->adapter;
449 struct mac_stats *s = &mac->stats;
450 unsigned int tx_mcnt = (unsigned int)s->tx_frames;
451 unsigned int rx_mcnt = (unsigned int)s->rx_frames;
452 unsigned int rx_xcnt;
455 tx_xcnt = 1; /* By default tx_xcnt is making progress*/
456 tx_tcnt = mac->tx_tcnt; /* If tx_mcnt is progressing ignore tx_tcnt*/
457 rx_xcnt = 1; /* By default rx_xcnt is making progress*/
458 if (tx_mcnt == mac->tx_mcnt) {
459 tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
460 A_XGM_TX_SPI4_SOP_EOP_CNT +
463 t3_write_reg(adap, A_TP_PIO_ADDR,
464 A_TP_TX_DROP_CNT_CH0 + macidx(mac));
465 tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap,
475 if (((tx_tcnt != mac->tx_tcnt) &&
476 (tx_xcnt == 0) && (mac->tx_xcnt == 0)) ||
477 ((mac->tx_mcnt == tx_mcnt) &&
478 (tx_xcnt != 0) && (mac->tx_xcnt != 0))) {
479 if (mac->toggle_cnt > 4) {
492 if (rx_mcnt != mac->rx_mcnt)
493 rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
494 A_XGM_RX_SPI4_SOP_EOP_CNT +
499 if (mac->rx_mcnt != s->rx_frames && rx_xcnt == 0 && mac->rx_xcnt == 0) {
505 mac->tx_tcnt = tx_tcnt;
506 mac->tx_xcnt = tx_xcnt;
507 mac->tx_mcnt = s->tx_frames;
508 mac->rx_xcnt = rx_xcnt;
509 mac->rx_mcnt = s->rx_frames;
511 t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
512 t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); /* flush */
513 t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, mac->txen);
514 t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); /* flush */
516 } else if (status == 2) {
524 * This function is called periodically to accumulate the current values of the
525 * RMON counters into the port statistics. Since the packet counters are only
526 * 32 bits they can overflow in ~286 secs at 10G, so the function should be
527 * called more frequently than that. The byte counters are 45-bit wide, they
528 * would overflow in ~7.8 hours.
530 const struct mac_stats *t3_mac_update_stats(struct cmac *mac)
532 #define RMON_READ(mac, addr) t3_read_reg(mac->adapter, addr + mac->offset)
533 #define RMON_UPDATE(mac, name, reg) \
534 (mac)->stats.name += (u64)RMON_READ(mac, A_XGM_STAT_##reg)
535 #define RMON_UPDATE64(mac, name, reg_lo, reg_hi) \
536 (mac)->stats.name += RMON_READ(mac, A_XGM_STAT_##reg_lo) + \
537 ((u64)RMON_READ(mac, A_XGM_STAT_##reg_hi) << 32)
541 RMON_UPDATE64(mac, rx_octets, RX_BYTES_LOW, RX_BYTES_HIGH);
542 RMON_UPDATE64(mac, rx_frames, RX_FRAMES_LOW, RX_FRAMES_HIGH);
543 RMON_UPDATE(mac, rx_mcast_frames, RX_MCAST_FRAMES);
544 RMON_UPDATE(mac, rx_bcast_frames, RX_BCAST_FRAMES);
545 RMON_UPDATE(mac, rx_fcs_errs, RX_CRC_ERR_FRAMES);
546 RMON_UPDATE(mac, rx_pause, RX_PAUSE_FRAMES);
547 RMON_UPDATE(mac, rx_jabber, RX_JABBER_FRAMES);
548 RMON_UPDATE(mac, rx_short, RX_SHORT_FRAMES);
549 RMON_UPDATE(mac, rx_symbol_errs, RX_SYM_CODE_ERR_FRAMES);
551 RMON_UPDATE(mac, rx_too_long, RX_OVERSIZE_FRAMES);
553 v = RMON_READ(mac, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT);
554 if (mac->adapter->params.rev == T3_REV_B2)
556 mac->stats.rx_too_long += v;
558 RMON_UPDATE(mac, rx_frames_64, RX_64B_FRAMES);
559 RMON_UPDATE(mac, rx_frames_65_127, RX_65_127B_FRAMES);
560 RMON_UPDATE(mac, rx_frames_128_255, RX_128_255B_FRAMES);
561 RMON_UPDATE(mac, rx_frames_256_511, RX_256_511B_FRAMES);
562 RMON_UPDATE(mac, rx_frames_512_1023, RX_512_1023B_FRAMES);
563 RMON_UPDATE(mac, rx_frames_1024_1518, RX_1024_1518B_FRAMES);
564 RMON_UPDATE(mac, rx_frames_1519_max, RX_1519_MAXB_FRAMES);
566 RMON_UPDATE64(mac, tx_octets, TX_BYTE_LOW, TX_BYTE_HIGH);
567 RMON_UPDATE64(mac, tx_frames, TX_FRAME_LOW, TX_FRAME_HIGH);
568 RMON_UPDATE(mac, tx_mcast_frames, TX_MCAST);
569 RMON_UPDATE(mac, tx_bcast_frames, TX_BCAST);
570 RMON_UPDATE(mac, tx_pause, TX_PAUSE);
571 /* This counts error frames in general (bad FCS, underrun, etc). */
572 RMON_UPDATE(mac, tx_underrun, TX_ERR_FRAMES);
574 RMON_UPDATE(mac, tx_frames_64, TX_64B_FRAMES);
575 RMON_UPDATE(mac, tx_frames_65_127, TX_65_127B_FRAMES);
576 RMON_UPDATE(mac, tx_frames_128_255, TX_128_255B_FRAMES);
577 RMON_UPDATE(mac, tx_frames_256_511, TX_256_511B_FRAMES);
578 RMON_UPDATE(mac, tx_frames_512_1023, TX_512_1023B_FRAMES);
579 RMON_UPDATE(mac, tx_frames_1024_1518, TX_1024_1518B_FRAMES);
580 RMON_UPDATE(mac, tx_frames_1519_max, TX_1519_MAXB_FRAMES);
582 /* The next stat isn't clear-on-read. */
583 t3_write_reg(mac->adapter, A_TP_MIB_INDEX, mac->offset ? 51 : 50);
584 v = t3_read_reg(mac->adapter, A_TP_MIB_RDATA);
585 lo = (u32)mac->stats.rx_cong_drops;
586 mac->stats.rx_cong_drops += (u64)(v - lo);