1 /**************************************************************************
3 Copyright (c) 2007, Chelsio Inc.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
28 ***************************************************************************/
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
34 #include <cxgb_include.h>
36 #include <dev/cxgb/cxgb_include.h>
40 #define msleep t3_os_sleep
43 * t3_wait_op_done_val - wait until an operation is completed
44 * @adapter: the adapter performing the operation
45 * @reg: the register to check for completion
46 * @mask: a single-bit field within @reg that indicates completion
47 * @polarity: the value of the field when the operation is completed
48 * @attempts: number of check iterations
49 * @delay: delay in usecs between iterations
50 * @valp: where to store the value of the register at completion time
52 * Wait until an operation is completed by checking a bit in a register
53 * up to @attempts times. If @valp is not NULL the value of the register
54 * at the time it indicated completion is stored there. Returns 0 if the
55 * operation completes and -EAGAIN otherwise.
57 int t3_wait_op_done_val(adapter_t *adapter, int reg, u32 mask, int polarity,
58 int attempts, int delay, u32 *valp)
61 u32 val = t3_read_reg(adapter, reg);
63 if (!!(val & mask) == polarity) {
76 * t3_write_regs - write a bunch of registers
77 * @adapter: the adapter to program
78 * @p: an array of register address/register value pairs
79 * @n: the number of address/value pairs
80 * @offset: register address offset
82 * Takes an array of register address/register value pairs and writes each
83 * value to the corresponding register. Register addresses are adjusted
84 * by the supplied offset.
86 void t3_write_regs(adapter_t *adapter, const struct addr_val_pair *p, int n,
90 t3_write_reg(adapter, p->reg_addr + offset, p->val);
96 * t3_set_reg_field - set a register field to a value
97 * @adapter: the adapter to program
98 * @addr: the register address
99 * @mask: specifies the portion of the register to modify
100 * @val: the new value for the register field
102 * Sets a register field specified by the supplied mask to the
105 void t3_set_reg_field(adapter_t *adapter, unsigned int addr, u32 mask, u32 val)
107 u32 v = t3_read_reg(adapter, addr) & ~mask;
109 t3_write_reg(adapter, addr, v | val);
110 (void) t3_read_reg(adapter, addr); /* flush */
114 * t3_read_indirect - read indirectly addressed registers
116 * @addr_reg: register holding the indirect address
117 * @data_reg: register holding the value of the indirect register
118 * @vals: where the read register values are stored
119 * @start_idx: index of first indirect register to read
120 * @nregs: how many indirect registers to read
122 * Reads registers that are accessed indirectly through an address/data
125 static void t3_read_indirect(adapter_t *adap, unsigned int addr_reg,
126 unsigned int data_reg, u32 *vals, unsigned int nregs,
127 unsigned int start_idx)
130 t3_write_reg(adap, addr_reg, start_idx);
131 *vals++ = t3_read_reg(adap, data_reg);
137 * t3_mc7_bd_read - read from MC7 through backdoor accesses
138 * @mc7: identifies MC7 to read from
139 * @start: index of first 64-bit word to read
140 * @n: number of 64-bit words to read
141 * @buf: where to store the read result
143 * Read n 64-bit words from MC7 starting at word start, using backdoor
146 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
149 static int shift[] = { 0, 0, 16, 24 };
150 static int step[] = { 0, 32, 16, 8 };
152 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
153 adapter_t *adap = mc7->adapter;
155 if (start >= size64 || start + n > size64)
158 start *= (8 << mc7->width);
163 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
167 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR,
169 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
170 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
171 while ((val & F_BUSY) && attempts--)
172 val = t3_read_reg(adap,
173 mc7->offset + A_MC7_BD_OP);
177 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
178 if (mc7->width == 0) {
179 val64 = t3_read_reg(adap,
180 mc7->offset + A_MC7_BD_DATA0);
181 val64 |= (u64)val << 32;
184 val >>= shift[mc7->width];
185 val64 |= (u64)val << (step[mc7->width] * i);
197 static void mi1_init(adapter_t *adap, const struct adapter_info *ai)
199 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
200 u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
203 if (!(ai->caps & SUPPORTED_10000baseT_Full))
205 t3_write_reg(adap, A_MI1_CFG, val);
208 #define MDIO_ATTEMPTS 20
211 * MI1 read/write operations for direct-addressed PHYs.
213 static int mi1_read(adapter_t *adapter, int phy_addr, int mmd_addr,
214 int reg_addr, unsigned int *valp)
217 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
223 t3_write_reg(adapter, A_MI1_ADDR, addr);
224 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
225 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
227 *valp = t3_read_reg(adapter, A_MI1_DATA);
228 MDIO_UNLOCK(adapter);
232 static int mi1_write(adapter_t *adapter, int phy_addr, int mmd_addr,
233 int reg_addr, unsigned int val)
236 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
242 t3_write_reg(adapter, A_MI1_ADDR, addr);
243 t3_write_reg(adapter, A_MI1_DATA, val);
244 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
245 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
246 MDIO_UNLOCK(adapter);
250 static struct mdio_ops mi1_mdio_ops = {
256 * MI1 read/write operations for indirect-addressed PHYs.
258 static int mi1_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
259 int reg_addr, unsigned int *valp)
262 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
265 t3_write_reg(adapter, A_MI1_ADDR, addr);
266 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
267 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
268 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
270 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
271 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
274 *valp = t3_read_reg(adapter, A_MI1_DATA);
276 MDIO_UNLOCK(adapter);
280 static int mi1_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
281 int reg_addr, unsigned int val)
284 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
287 t3_write_reg(adapter, A_MI1_ADDR, addr);
288 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
289 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
290 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
292 t3_write_reg(adapter, A_MI1_DATA, val);
293 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
294 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
297 MDIO_UNLOCK(adapter);
301 static struct mdio_ops mi1_mdio_ext_ops = {
307 * t3_mdio_change_bits - modify the value of a PHY register
308 * @phy: the PHY to operate on
309 * @mmd: the device address
310 * @reg: the register address
311 * @clear: what part of the register value to mask off
312 * @set: what part of the register value to set
314 * Changes the value of a PHY register by applying a mask to its current
315 * value and ORing the result with a new value.
317 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
323 ret = mdio_read(phy, mmd, reg, &val);
326 ret = mdio_write(phy, mmd, reg, val | set);
332 * t3_phy_reset - reset a PHY block
333 * @phy: the PHY to operate on
334 * @mmd: the device address of the PHY block to reset
335 * @wait: how long to wait for the reset to complete in 1ms increments
337 * Resets a PHY block and optionally waits for the reset to complete.
338 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
341 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
346 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
351 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
357 } while (ctl && --wait);
363 * t3_phy_advertise - set the PHY advertisement registers for autoneg
364 * @phy: the PHY to operate on
365 * @advert: bitmap of capabilities the PHY should advertise
367 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
368 * requested capabilities.
370 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
373 unsigned int val = 0;
375 err = mdio_read(phy, 0, MII_CTRL1000, &val);
379 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
380 if (advert & ADVERTISED_1000baseT_Half)
381 val |= ADVERTISE_1000HALF;
382 if (advert & ADVERTISED_1000baseT_Full)
383 val |= ADVERTISE_1000FULL;
385 err = mdio_write(phy, 0, MII_CTRL1000, val);
390 if (advert & ADVERTISED_10baseT_Half)
391 val |= ADVERTISE_10HALF;
392 if (advert & ADVERTISED_10baseT_Full)
393 val |= ADVERTISE_10FULL;
394 if (advert & ADVERTISED_100baseT_Half)
395 val |= ADVERTISE_100HALF;
396 if (advert & ADVERTISED_100baseT_Full)
397 val |= ADVERTISE_100FULL;
398 if (advert & ADVERTISED_Pause)
399 val |= ADVERTISE_PAUSE_CAP;
400 if (advert & ADVERTISED_Asym_Pause)
401 val |= ADVERTISE_PAUSE_ASYM;
402 return mdio_write(phy, 0, MII_ADVERTISE, val);
406 * t3_phy_advertise_fiber - set fiber PHY advertisement register
407 * @phy: the PHY to operate on
408 * @advert: bitmap of capabilities the PHY should advertise
410 * Sets a fiber PHY's advertisement register to advertise the
411 * requested capabilities.
413 int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
415 unsigned int val = 0;
417 if (advert & ADVERTISED_1000baseT_Half)
418 val |= ADVERTISE_1000XHALF;
419 if (advert & ADVERTISED_1000baseT_Full)
420 val |= ADVERTISE_1000XFULL;
421 if (advert & ADVERTISED_Pause)
422 val |= ADVERTISE_1000XPAUSE;
423 if (advert & ADVERTISED_Asym_Pause)
424 val |= ADVERTISE_1000XPSE_ASYM;
425 return mdio_write(phy, 0, MII_ADVERTISE, val);
429 * t3_set_phy_speed_duplex - force PHY speed and duplex
430 * @phy: the PHY to operate on
431 * @speed: requested PHY speed
432 * @duplex: requested PHY duplex
434 * Force a 10/100/1000 PHY's speed and duplex. This also disables
435 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
437 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
442 err = mdio_read(phy, 0, MII_BMCR, &ctl);
447 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
448 if (speed == SPEED_100)
449 ctl |= BMCR_SPEED100;
450 else if (speed == SPEED_1000)
451 ctl |= BMCR_SPEED1000;
454 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
455 if (duplex == DUPLEX_FULL)
456 ctl |= BMCR_FULLDPLX;
458 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
459 ctl |= BMCR_ANENABLE;
460 return mdio_write(phy, 0, MII_BMCR, ctl);
463 int t3_phy_lasi_intr_enable(struct cphy *phy)
465 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
468 int t3_phy_lasi_intr_disable(struct cphy *phy)
470 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
473 int t3_phy_lasi_intr_clear(struct cphy *phy)
477 return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
480 int t3_phy_lasi_intr_handler(struct cphy *phy)
483 int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
487 return (status & 1) ? cphy_cause_link_change : 0;
490 static struct adapter_info t3_adap_info[] = {
492 F_GPIO2_OEN | F_GPIO4_OEN |
493 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
494 &mi1_mdio_ops, "Chelsio PE9000" },
496 F_GPIO2_OEN | F_GPIO4_OEN |
497 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
498 &mi1_mdio_ops, "Chelsio T302" },
500 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
501 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
502 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
503 &mi1_mdio_ext_ops, "Chelsio T310" },
505 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
506 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
507 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
508 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
509 &mi1_mdio_ext_ops, "Chelsio T320" },
511 F_GPIO5_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO5_OUT_VAL |
512 F_GPIO6_OUT_VAL | F_GPIO7_OUT_VAL,
513 { S_GPIO1, S_GPIO2, S_GPIO3, S_GPIO4 }, SUPPORTED_AUI,
514 &mi1_mdio_ops, "Chelsio T304" },
517 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
518 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
519 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
520 &mi1_mdio_ext_ops, "Chelsio N310" }
524 * Return the adapter_info structure with a given index. Out-of-range indices
527 const struct adapter_info *t3_get_adapter_info(unsigned int id)
529 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
532 struct port_type_info {
533 int (*phy_prep)(struct cphy *phy, adapter_t *adapter, int phy_addr,
534 const struct mdio_ops *ops);
537 static struct port_type_info port_types[] = {
539 { t3_ael1002_phy_prep },
540 { t3_vsc8211_phy_prep },
541 { t3_mv88e1xxx_phy_prep },
542 { t3_xaui_direct_phy_prep },
543 { t3_ael2005_phy_prep },
544 { t3_qt2045_phy_prep },
545 { t3_ael1006_phy_prep },
546 { t3_tn1010_phy_prep },
549 #define VPD_ENTRY(name, len) \
550 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
553 * Partial EEPROM Vital Product Data structure. Includes only the ID and
562 VPD_ENTRY(pn, 16); /* part number */
563 VPD_ENTRY(ec, ECNUM_LEN); /* EC level */
564 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
565 VPD_ENTRY(na, 12); /* MAC address base */
566 VPD_ENTRY(cclk, 6); /* core clock */
567 VPD_ENTRY(mclk, 6); /* mem clock */
568 VPD_ENTRY(uclk, 6); /* uP clk */
569 VPD_ENTRY(mdc, 6); /* MDIO clk */
570 VPD_ENTRY(mt, 2); /* mem timing */
571 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
572 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
573 VPD_ENTRY(port0, 2); /* PHY0 complex */
574 VPD_ENTRY(port1, 2); /* PHY1 complex */
575 VPD_ENTRY(port2, 2); /* PHY2 complex */
576 VPD_ENTRY(port3, 2); /* PHY3 complex */
577 VPD_ENTRY(rv, 1); /* csum */
578 u32 pad; /* for multiple-of-4 sizing and alignment */
581 #define EEPROM_MAX_POLL 40
582 #define EEPROM_STAT_ADDR 0x4000
583 #define VPD_BASE 0xc00
586 * t3_seeprom_read - read a VPD EEPROM location
587 * @adapter: adapter to read
588 * @addr: EEPROM address
589 * @data: where to store the read data
591 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
592 * VPD ROM capability. A zero is written to the flag bit when the
593 * addres is written to the control register. The hardware device will
594 * set the flag to 1 when 4 bytes have been read into the data register.
596 int t3_seeprom_read(adapter_t *adapter, u32 addr, u32 *data)
599 int attempts = EEPROM_MAX_POLL;
600 unsigned int base = adapter->params.pci.vpd_cap_addr;
602 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
605 t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR, (u16)addr);
608 t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
609 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
611 if (!(val & PCI_VPD_ADDR_F)) {
612 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
615 t3_os_pci_read_config_4(adapter, base + PCI_VPD_DATA, data);
616 *data = le32_to_cpu(*data);
621 * t3_seeprom_write - write a VPD EEPROM location
622 * @adapter: adapter to write
623 * @addr: EEPROM address
624 * @data: value to write
626 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
627 * VPD ROM capability.
629 int t3_seeprom_write(adapter_t *adapter, u32 addr, u32 data)
632 int attempts = EEPROM_MAX_POLL;
633 unsigned int base = adapter->params.pci.vpd_cap_addr;
635 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
638 t3_os_pci_write_config_4(adapter, base + PCI_VPD_DATA,
640 t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR,
641 (u16)addr | PCI_VPD_ADDR_F);
644 t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
645 } while ((val & PCI_VPD_ADDR_F) && --attempts);
647 if (val & PCI_VPD_ADDR_F) {
648 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
655 * t3_seeprom_wp - enable/disable EEPROM write protection
656 * @adapter: the adapter
657 * @enable: 1 to enable write protection, 0 to disable it
659 * Enables or disables write protection on the serial EEPROM.
661 int t3_seeprom_wp(adapter_t *adapter, int enable)
663 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
667 * Convert a character holding a hex digit to a number.
669 static unsigned int hex2int(unsigned char c)
671 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
675 * get_vpd_params - read VPD parameters from VPD EEPROM
676 * @adapter: adapter to read
677 * @p: where to store the parameters
679 * Reads card parameters stored in VPD EEPROM.
681 static int get_vpd_params(adapter_t *adapter, struct vpd_params *p)
687 * Card information is normally at VPD_BASE but some early cards had
690 ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
693 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
695 for (i = 0; i < sizeof(vpd); i += 4) {
696 ret = t3_seeprom_read(adapter, addr + i,
697 (u32 *)((u8 *)&vpd + i));
702 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
703 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
704 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
705 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
706 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
707 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
708 memcpy(p->ec, vpd.ec_data, ECNUM_LEN);
710 /* Old eeproms didn't have port information */
711 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
712 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
713 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
715 p->port_type[0] = (u8)hex2int(vpd.port0_data[0]);
716 p->port_type[1] = (u8)hex2int(vpd.port1_data[0]);
717 p->port_type[2] = (u8)hex2int(vpd.port2_data[0]);
718 p->port_type[3] = (u8)hex2int(vpd.port3_data[0]);
719 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
720 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
723 for (i = 0; i < 6; i++)
724 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
725 hex2int(vpd.na_data[2 * i + 1]);
729 /* BIOS boot header */
730 typedef struct boot_header_s {
731 u8 signature[2]; /* signature */
732 u8 length; /* image length (include header) */
733 u8 offset[4]; /* initialization vector */
734 u8 reserved[19]; /* reserved */
735 u8 exheader[2]; /* offset to expansion header */
738 /* serial flash and firmware constants */
740 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
741 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
742 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
744 /* flash command opcodes */
745 SF_PROG_PAGE = 2, /* program page */
746 SF_WR_DISABLE = 4, /* disable writes */
747 SF_RD_STATUS = 5, /* read status register */
748 SF_WR_ENABLE = 6, /* enable writes */
749 SF_RD_DATA_FAST = 0xb, /* read flash */
750 SF_ERASE_SECTOR = 0xd8, /* erase sector */
752 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
753 OLD_FW_VERS_ADDR = 0x77ffc, /* flash address holding FW version */
754 FW_VERS_ADDR = 0x7fffc, /* flash address holding FW version */
755 FW_MIN_SIZE = 8, /* at least version and csum */
756 FW_MAX_SIZE = FW_VERS_ADDR - FW_FLASH_BOOT_ADDR,
758 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
759 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */
760 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */
761 BOOT_MIN_SIZE = sizeof(boot_header_t), /* at least basic header */
762 BOOT_MAX_SIZE = 0xff*BOOT_SIZE_INC /* 1 byte * length increment */
766 * sf1_read - read data from the serial flash
767 * @adapter: the adapter
768 * @byte_cnt: number of bytes to read
769 * @cont: whether another operation will be chained
770 * @valp: where to store the read data
772 * Reads up to 4 bytes of data from the serial flash. The location of
773 * the read needs to be specified prior to calling this by issuing the
774 * appropriate commands to the serial flash.
776 static int sf1_read(adapter_t *adapter, unsigned int byte_cnt, int cont,
781 if (!byte_cnt || byte_cnt > 4)
783 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
785 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
786 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
788 *valp = t3_read_reg(adapter, A_SF_DATA);
793 * sf1_write - write data to the serial flash
794 * @adapter: the adapter
795 * @byte_cnt: number of bytes to write
796 * @cont: whether another operation will be chained
797 * @val: value to write
799 * Writes up to 4 bytes of data to the serial flash. The location of
800 * the write needs to be specified prior to calling this by issuing the
801 * appropriate commands to the serial flash.
803 static int sf1_write(adapter_t *adapter, unsigned int byte_cnt, int cont,
806 if (!byte_cnt || byte_cnt > 4)
808 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
810 t3_write_reg(adapter, A_SF_DATA, val);
811 t3_write_reg(adapter, A_SF_OP,
812 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
813 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
817 * flash_wait_op - wait for a flash operation to complete
818 * @adapter: the adapter
819 * @attempts: max number of polls of the status register
820 * @delay: delay between polls in ms
822 * Wait for a flash operation to complete by polling the status register.
824 static int flash_wait_op(adapter_t *adapter, int attempts, int delay)
830 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
831 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
843 * t3_read_flash - read words from serial flash
844 * @adapter: the adapter
845 * @addr: the start address for the read
846 * @nwords: how many 32-bit words to read
847 * @data: where to store the read data
848 * @byte_oriented: whether to store data as bytes or as words
850 * Read the specified number of 32-bit words from the serial flash.
851 * If @byte_oriented is set the read data is stored as a byte array
852 * (i.e., big-endian), otherwise as 32-bit words in the platform's
855 int t3_read_flash(adapter_t *adapter, unsigned int addr, unsigned int nwords,
856 u32 *data, int byte_oriented)
860 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
863 addr = swab32(addr) | SF_RD_DATA_FAST;
865 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
866 (ret = sf1_read(adapter, 1, 1, data)) != 0)
869 for ( ; nwords; nwords--, data++) {
870 ret = sf1_read(adapter, 4, nwords > 1, data);
874 *data = htonl(*data);
880 * t3_write_flash - write up to a page of data to the serial flash
881 * @adapter: the adapter
882 * @addr: the start address to write
883 * @n: length of data to write
884 * @data: the data to write
885 * @byte_oriented: whether to store data as bytes or as words
887 * Writes up to a page of data (256 bytes) to the serial flash starting
888 * at the given address.
889 * If @byte_oriented is set the write data is stored as a 32-bit
890 * big-endian array, otherwise in the processor's native endianess.
893 static int t3_write_flash(adapter_t *adapter, unsigned int addr,
894 unsigned int n, const u8 *data,
899 unsigned int c, left, val, offset = addr & 0xff;
901 if (addr + n > SF_SIZE || offset + n > 256)
904 val = swab32(addr) | SF_PROG_PAGE;
906 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
907 (ret = sf1_write(adapter, 4, 1, val)) != 0)
910 for (left = n; left; left -= c) {
912 val = *(const u32*)data;
917 ret = sf1_write(adapter, c, c != left, val);
921 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
924 /* Read the page to verify the write succeeded */
925 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
930 if (memcmp(data - n, (u8 *)buf + offset, n))
936 * t3_get_tp_version - read the tp sram version
937 * @adapter: the adapter
938 * @vers: where to place the version
940 * Reads the protocol sram version from sram.
942 int t3_get_tp_version(adapter_t *adapter, u32 *vers)
946 /* Get version loaded in SRAM */
947 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
948 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
953 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
959 * t3_check_tpsram_version - read the tp sram version
960 * @adapter: the adapter
963 int t3_check_tpsram_version(adapter_t *adapter, int *must_load)
967 unsigned int major, minor;
969 if (adapter->params.rev == T3_REV_A)
974 ret = t3_get_tp_version(adapter, &vers);
978 vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
980 major = G_TP_VERSION_MAJOR(vers);
981 minor = G_TP_VERSION_MINOR(vers);
983 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
986 if (major != TP_VERSION_MAJOR)
987 CH_ERR(adapter, "found wrong TP version (%u.%u), "
988 "driver needs version %d.%d\n", major, minor,
989 TP_VERSION_MAJOR, TP_VERSION_MINOR);
992 CH_ERR(adapter, "found wrong TP version (%u.%u), "
993 "driver compiled for version %d.%d\n", major, minor,
994 TP_VERSION_MAJOR, TP_VERSION_MINOR);
1000 * t3_check_tpsram - check if provided protocol SRAM
1001 * is compatible with this driver
1002 * @adapter: the adapter
1003 * @tp_sram: the firmware image to write
1006 * Checks if an adapter's tp sram is compatible with the driver.
1007 * Returns 0 if the versions are compatible, a negative error otherwise.
1009 int t3_check_tpsram(adapter_t *adapter, const u8 *tp_sram, unsigned int size)
1013 const u32 *p = (const u32 *)tp_sram;
1015 /* Verify checksum */
1016 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1017 csum += ntohl(p[i]);
1018 if (csum != 0xffffffff) {
1019 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
1027 enum fw_version_type {
1033 * t3_get_fw_version - read the firmware version
1034 * @adapter: the adapter
1035 * @vers: where to place the version
1037 * Reads the FW version from flash.
1039 int t3_get_fw_version(adapter_t *adapter, u32 *vers)
1041 int ret = t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1043 if (!ret && *vers != 0xffffffff)
1046 return t3_read_flash(adapter, OLD_FW_VERS_ADDR, 1, vers, 0);
1050 * t3_check_fw_version - check if the FW is compatible with this driver
1051 * @adapter: the adapter
1053 * Checks if an adapter's FW is compatible with the driver. Returns 0
1054 * if the versions are compatible, a negative error otherwise.
1056 int t3_check_fw_version(adapter_t *adapter, int *must_load)
1060 unsigned int type, major, minor;
1063 ret = t3_get_fw_version(adapter, &vers);
1067 type = G_FW_VERSION_TYPE(vers);
1068 major = G_FW_VERSION_MAJOR(vers);
1069 minor = G_FW_VERSION_MINOR(vers);
1071 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1072 minor == FW_VERSION_MINOR)
1075 if (major != FW_VERSION_MAJOR)
1076 CH_ERR(adapter, "found wrong FW version(%u.%u), "
1077 "driver needs version %u.%u\n", major, minor,
1078 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1079 else if ((int)minor < FW_VERSION_MINOR) {
1081 CH_WARN(adapter, "found old FW minor version(%u.%u), "
1082 "driver compiled for version %u.%u\n", major, minor,
1083 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1085 CH_WARN(adapter, "found newer FW version(%u.%u), "
1086 "driver compiled for version %u.%u\n", major, minor,
1087 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1094 * t3_flash_erase_sectors - erase a range of flash sectors
1095 * @adapter: the adapter
1096 * @start: the first sector to erase
1097 * @end: the last sector to erase
1099 * Erases the sectors in the given range.
1101 static int t3_flash_erase_sectors(adapter_t *adapter, int start, int end)
1103 while (start <= end) {
1106 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1107 (ret = sf1_write(adapter, 4, 0,
1108 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1109 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1117 * t3_load_fw - download firmware
1118 * @adapter: the adapter
1119 * @fw_data: the firmware image to write
1122 * Write the supplied firmware image to the card's serial flash.
1123 * The FW image has the following sections: @size - 8 bytes of code and
1124 * data, followed by 4 bytes of FW version, followed by the 32-bit
1125 * 1's complement checksum of the whole image.
1127 int t3_load_fw(adapter_t *adapter, const u8 *fw_data, unsigned int size)
1131 const u32 *p = (const u32 *)fw_data;
1132 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1134 if ((size & 3) || size < FW_MIN_SIZE)
1136 if (size - 8 > FW_MAX_SIZE)
1139 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1140 csum += ntohl(p[i]);
1141 if (csum != 0xffffffff) {
1142 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1147 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1151 size -= 8; /* trim off version and checksum */
1152 for (addr = FW_FLASH_BOOT_ADDR; size; ) {
1153 unsigned int chunk_size = min(size, 256U);
1155 ret = t3_write_flash(adapter, addr, chunk_size, fw_data, 1);
1160 fw_data += chunk_size;
1164 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data, 1);
1167 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1172 * t3_load_boot - download boot flash
1173 * @adapter: the adapter
1174 * @boot_data: the boot image to write
1177 * Write the supplied boot image to the card's serial flash.
1178 * The boot image has the following sections: a 28-byte header and the
1181 int t3_load_boot(adapter_t *adapter, u8 *boot_data, unsigned int size)
1183 boot_header_t *header = (boot_header_t *)boot_data;
1186 unsigned int boot_sector = BOOT_FLASH_BOOT_ADDR >> 16;
1187 unsigned int boot_end = (BOOT_FLASH_BOOT_ADDR + size - 1) >> 16;
1190 * Perform some primitive sanity testing to avoid accidentally
1191 * writing garbage over the boot sectors. We ought to check for
1192 * more but it's not worth it for now ...
1194 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
1195 CH_ERR(adapter, "boot image too small/large\n");
1198 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE) {
1199 CH_ERR(adapter, "boot image missing signature\n");
1202 if (header->length * BOOT_SIZE_INC != size) {
1203 CH_ERR(adapter, "boot image header length != image length\n");
1207 ret = t3_flash_erase_sectors(adapter, boot_sector, boot_end);
1211 for (addr = BOOT_FLASH_BOOT_ADDR; size; ) {
1212 unsigned int chunk_size = min(size, 256U);
1214 ret = t3_write_flash(adapter, addr, chunk_size, boot_data, 0);
1219 boot_data += chunk_size;
1225 CH_ERR(adapter, "boot image download failed, error %d\n", ret);
1229 #define CIM_CTL_BASE 0x2000
1232 * t3_cim_ctl_blk_read - read a block from CIM control region
1233 * @adap: the adapter
1234 * @addr: the start address within the CIM control region
1235 * @n: number of words to read
1236 * @valp: where to store the result
1238 * Reads a block of 4-byte words from the CIM control region.
1240 int t3_cim_ctl_blk_read(adapter_t *adap, unsigned int addr, unsigned int n,
1245 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1248 for ( ; !ret && n--; addr += 4) {
1249 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1250 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1253 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1259 * t3_link_changed - handle interface link changes
1260 * @adapter: the adapter
1261 * @port_id: the port index that changed link state
1263 * Called when a port's link settings change to propagate the new values
1264 * to the associated PHY and MAC. After performing the common tasks it
1265 * invokes an OS-specific handler.
1267 void t3_link_changed(adapter_t *adapter, int port_id)
1269 int link_ok, speed, duplex, fc;
1270 struct port_info *pi = adap2pinfo(adapter, port_id);
1271 struct cphy *phy = &pi->phy;
1272 struct cmac *mac = &pi->mac;
1273 struct link_config *lc = &pi->link_config;
1275 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1277 if (lc->requested_fc & PAUSE_AUTONEG)
1278 fc &= lc->requested_fc;
1280 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1282 if (link_ok == lc->link_ok && speed == lc->speed &&
1283 duplex == lc->duplex && fc == lc->fc)
1284 return; /* nothing changed */
1286 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1287 uses_xaui(adapter)) {
1290 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1291 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1293 lc->link_ok = (unsigned char)link_ok;
1294 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1295 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1297 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1298 /* Set MAC speed, duplex, and flow control to match PHY. */
1299 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1300 lc->fc = (unsigned char)fc;
1303 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1307 * t3_link_start - apply link configuration to MAC/PHY
1308 * @phy: the PHY to setup
1309 * @mac: the MAC to setup
1310 * @lc: the requested link configuration
1312 * Set up a port's MAC and PHY according to a desired link configuration.
1313 * - If the PHY can auto-negotiate first decide what to advertise, then
1314 * enable/disable auto-negotiation as desired, and reset.
1315 * - If the PHY does not auto-negotiate just reset it.
1316 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1317 * otherwise do it later based on the outcome of auto-negotiation.
1319 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1321 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1324 if (lc->supported & SUPPORTED_Autoneg) {
1325 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1327 lc->advertising |= ADVERTISED_Asym_Pause;
1329 lc->advertising |= ADVERTISED_Pause;
1331 phy->ops->advertise(phy, lc->advertising);
1333 if (lc->autoneg == AUTONEG_DISABLE) {
1334 lc->speed = lc->requested_speed;
1335 lc->duplex = lc->requested_duplex;
1336 lc->fc = (unsigned char)fc;
1337 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1339 /* Also disables autoneg */
1340 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1342 phy->ops->autoneg_enable(phy);
1344 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1345 lc->fc = (unsigned char)fc;
1346 phy->ops->reset(phy, 0);
1352 * t3_set_vlan_accel - control HW VLAN extraction
1353 * @adapter: the adapter
1354 * @ports: bitmap of adapter ports to operate on
1355 * @on: enable (1) or disable (0) HW VLAN extraction
1357 * Enables or disables HW extraction of VLAN tags for the given port.
1359 void t3_set_vlan_accel(adapter_t *adapter, unsigned int ports, int on)
1361 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1362 ports << S_VLANEXTRACTIONENABLE,
1363 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1367 unsigned int mask; /* bits to check in interrupt status */
1368 const char *msg; /* message to print or NULL */
1369 short stat_idx; /* stat counter to increment or -1 */
1370 unsigned short fatal; /* whether the condition reported is fatal */
1374 * t3_handle_intr_status - table driven interrupt handler
1375 * @adapter: the adapter that generated the interrupt
1376 * @reg: the interrupt status register to process
1377 * @mask: a mask to apply to the interrupt status
1378 * @acts: table of interrupt actions
1379 * @stats: statistics counters tracking interrupt occurences
1381 * A table driven interrupt handler that applies a set of masks to an
1382 * interrupt status word and performs the corresponding actions if the
1383 * interrupts described by the mask have occured. The actions include
1384 * optionally printing a warning or alert message, and optionally
1385 * incrementing a stat counter. The table is terminated by an entry
1386 * specifying mask 0. Returns the number of fatal interrupt conditions.
1388 static int t3_handle_intr_status(adapter_t *adapter, unsigned int reg,
1390 const struct intr_info *acts,
1391 unsigned long *stats)
1394 unsigned int status = t3_read_reg(adapter, reg) & mask;
1396 for ( ; acts->mask; ++acts) {
1397 if (!(status & acts->mask)) continue;
1400 CH_ALERT(adapter, "%s (0x%x)\n",
1401 acts->msg, status & acts->mask);
1402 CTR2(KTR_CXGB, "%s (0x%x)\n",
1403 acts->msg, status & acts->mask);
1404 } else if (acts->msg)
1405 CH_WARN(adapter, "%s (0x%x)\n",
1406 acts->msg, status & acts->mask);
1407 if (acts->stat_idx >= 0)
1408 stats[acts->stat_idx]++;
1410 if (status) /* clear processed interrupts */
1411 t3_write_reg(adapter, reg, status);
1415 #define SGE_INTR_MASK (F_RSPQDISABLED | \
1416 F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1417 F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1418 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1419 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1420 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1422 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1423 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1425 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1426 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1427 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1428 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1429 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1430 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1431 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1432 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1433 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1434 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1435 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1436 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1437 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1438 F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1439 F_TXPARERR | V_BISTERR(M_BISTERR))
1440 #define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1441 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1442 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1443 #define ULPTX_INTR_MASK 0xfc
1444 #define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1445 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1446 F_ZERO_SWITCH_ERROR)
1447 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1448 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1449 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1450 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1451 F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1452 F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1453 F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1454 F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1455 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1456 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1457 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1458 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1459 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1460 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1461 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1462 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1463 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1464 V_MCAPARERRENB(M_MCAPARERRENB))
1465 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1466 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1467 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1468 F_MPS0 | F_CPL_SWITCH)
1471 * Interrupt handler for the PCIX1 module.
1473 static void pci_intr_handler(adapter_t *adapter)
1475 static struct intr_info pcix1_intr_info[] = {
1476 { F_MSTDETPARERR, "PCI master detected parity error", -1, 1 },
1477 { F_SIGTARABT, "PCI signaled target abort", -1, 1 },
1478 { F_RCVTARABT, "PCI received target abort", -1, 1 },
1479 { F_RCVMSTABT, "PCI received master abort", -1, 1 },
1480 { F_SIGSYSERR, "PCI signaled system error", -1, 1 },
1481 { F_DETPARERR, "PCI detected parity error", -1, 1 },
1482 { F_SPLCMPDIS, "PCI split completion discarded", -1, 1 },
1483 { F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1 },
1484 { F_RCVSPLCMPERR, "PCI received split completion error", -1,
1486 { F_DETCORECCERR, "PCI correctable ECC error",
1487 STAT_PCI_CORR_ECC, 0 },
1488 { F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1 },
1489 { F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1490 { V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1492 { V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1494 { V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1496 { V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1501 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1502 pcix1_intr_info, adapter->irq_stats))
1503 t3_fatal_err(adapter);
1507 * Interrupt handler for the PCIE module.
1509 static void pcie_intr_handler(adapter_t *adapter)
1511 static struct intr_info pcie_intr_info[] = {
1512 { F_PEXERR, "PCI PEX error", -1, 1 },
1514 "PCI unexpected split completion DMA read error", -1, 1 },
1516 "PCI unexpected split completion DMA command error", -1, 1 },
1517 { F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1518 { F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1 },
1519 { F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1 },
1520 { F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1 },
1521 { V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1522 "PCI MSI-X table/PBA parity error", -1, 1 },
1523 { F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1 },
1524 { F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1 },
1525 { F_RXPARERR, "PCI Rx parity error", -1, 1 },
1526 { F_TXPARERR, "PCI Tx parity error", -1, 1 },
1527 { V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1 },
1531 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1532 CH_ALERT(adapter, "PEX error code 0x%x\n",
1533 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1535 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1536 pcie_intr_info, adapter->irq_stats))
1537 t3_fatal_err(adapter);
1541 * TP interrupt handler.
1543 static void tp_intr_handler(adapter_t *adapter)
1545 static struct intr_info tp_intr_info[] = {
1546 { 0xffffff, "TP parity error", -1, 1 },
1547 { 0x1000000, "TP out of Rx pages", -1, 1 },
1548 { 0x2000000, "TP out of Tx pages", -1, 1 },
1551 static struct intr_info tp_intr_info_t3c[] = {
1552 { 0x1fffffff, "TP parity error", -1, 1 },
1553 { F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1 },
1554 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1558 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1559 adapter->params.rev < T3_REV_C ?
1560 tp_intr_info : tp_intr_info_t3c, NULL))
1561 t3_fatal_err(adapter);
1565 * CIM interrupt handler.
1567 static void cim_intr_handler(adapter_t *adapter)
1569 static struct intr_info cim_intr_info[] = {
1570 { F_RSVDSPACEINT, "CIM reserved space write", -1, 1 },
1571 { F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1 },
1572 { F_FLASHRANGEINT, "CIM flash address out of range", -1, 1 },
1573 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1574 { F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1 },
1575 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1576 { F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1 },
1577 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1578 { F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1 },
1579 { F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1 },
1580 { F_BLKRDPLINT, "CIM block read from PL space", -1, 1 },
1581 { F_BLKWRPLINT, "CIM block write to PL space", -1, 1 },
1582 { F_DRAMPARERR, "CIM DRAM parity error", -1, 1 },
1583 { F_ICACHEPARERR, "CIM icache parity error", -1, 1 },
1584 { F_DCACHEPARERR, "CIM dcache parity error", -1, 1 },
1585 { F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1 },
1586 { F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1 },
1587 { F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1 },
1588 { F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1 },
1589 { F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1 },
1590 { F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1 },
1591 { F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1 },
1592 { F_ITAGPARERR, "CIM itag parity error", -1, 1 },
1593 { F_DTAGPARERR, "CIM dtag parity error", -1, 1 },
1597 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, CIM_INTR_MASK,
1598 cim_intr_info, NULL))
1599 t3_fatal_err(adapter);
1603 * ULP RX interrupt handler.
1605 static void ulprx_intr_handler(adapter_t *adapter)
1607 static struct intr_info ulprx_intr_info[] = {
1608 { F_PARERRDATA, "ULP RX data parity error", -1, 1 },
1609 { F_PARERRPCMD, "ULP RX command parity error", -1, 1 },
1610 { F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1 },
1611 { F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1 },
1612 { F_ARBFPERR, "ULP RX ArbF parity error", -1, 1 },
1613 { F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1 },
1614 { F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1 },
1615 { F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1 },
1619 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1620 ulprx_intr_info, NULL))
1621 t3_fatal_err(adapter);
1625 * ULP TX interrupt handler.
1627 static void ulptx_intr_handler(adapter_t *adapter)
1629 static struct intr_info ulptx_intr_info[] = {
1630 { F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1631 STAT_ULP_CH0_PBL_OOB, 0 },
1632 { F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1633 STAT_ULP_CH1_PBL_OOB, 0 },
1634 { 0xfc, "ULP TX parity error", -1, 1 },
1638 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1639 ulptx_intr_info, adapter->irq_stats))
1640 t3_fatal_err(adapter);
1643 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1644 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1645 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1646 F_ICSPI1_TX_FRAMING_ERROR)
1647 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1648 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1649 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1650 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1653 * PM TX interrupt handler.
1655 static void pmtx_intr_handler(adapter_t *adapter)
1657 static struct intr_info pmtx_intr_info[] = {
1658 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1659 { ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1 },
1660 { OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1 },
1661 { V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1662 "PMTX ispi parity error", -1, 1 },
1663 { V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1664 "PMTX ospi parity error", -1, 1 },
1668 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1669 pmtx_intr_info, NULL))
1670 t3_fatal_err(adapter);
1673 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1674 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1675 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1676 F_IESPI1_TX_FRAMING_ERROR)
1677 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1678 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1679 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1680 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1683 * PM RX interrupt handler.
1685 static void pmrx_intr_handler(adapter_t *adapter)
1687 static struct intr_info pmrx_intr_info[] = {
1688 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1689 { IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1 },
1690 { OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1 },
1691 { V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1692 "PMRX ispi parity error", -1, 1 },
1693 { V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1694 "PMRX ospi parity error", -1, 1 },
1698 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1699 pmrx_intr_info, NULL))
1700 t3_fatal_err(adapter);
1704 * CPL switch interrupt handler.
1706 static void cplsw_intr_handler(adapter_t *adapter)
1708 static struct intr_info cplsw_intr_info[] = {
1709 { F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1 },
1710 { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 },
1711 { F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1 },
1712 { F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1 },
1713 { F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1 },
1714 { F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1 },
1718 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1719 cplsw_intr_info, NULL))
1720 t3_fatal_err(adapter);
1724 * MPS interrupt handler.
1726 static void mps_intr_handler(adapter_t *adapter)
1728 static struct intr_info mps_intr_info[] = {
1729 { 0x1ff, "MPS parity error", -1, 1 },
1733 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1734 mps_intr_info, NULL))
1735 t3_fatal_err(adapter);
1738 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1741 * MC7 interrupt handler.
1743 static void mc7_intr_handler(struct mc7 *mc7)
1745 adapter_t *adapter = mc7->adapter;
1746 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1749 mc7->stats.corr_err++;
1750 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1751 "data 0x%x 0x%x 0x%x\n", mc7->name,
1752 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1753 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1754 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1755 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1759 mc7->stats.uncorr_err++;
1760 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1761 "data 0x%x 0x%x 0x%x\n", mc7->name,
1762 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1763 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1764 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1765 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1769 mc7->stats.parity_err++;
1770 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1771 mc7->name, G_PE(cause));
1777 if (adapter->params.rev > 0)
1778 addr = t3_read_reg(adapter,
1779 mc7->offset + A_MC7_ERR_ADDR);
1780 mc7->stats.addr_err++;
1781 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1785 if (cause & MC7_INTR_FATAL)
1786 t3_fatal_err(adapter);
1788 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1791 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1792 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1794 * XGMAC interrupt handler.
1796 static int mac_intr_handler(adapter_t *adap, unsigned int idx)
1801 idx = idx == 0 ? 0 : adapter_info(adap)->nports0; /* MAC idx -> port */
1802 mac = &adap2pinfo(adap, idx)->mac;
1803 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1805 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1806 mac->stats.tx_fifo_parity_err++;
1807 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1809 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1810 mac->stats.rx_fifo_parity_err++;
1811 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1813 if (cause & F_TXFIFO_UNDERRUN)
1814 mac->stats.tx_fifo_urun++;
1815 if (cause & F_RXFIFO_OVERFLOW)
1816 mac->stats.rx_fifo_ovfl++;
1817 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1818 mac->stats.serdes_signal_loss++;
1819 if (cause & F_XAUIPCSCTCERR)
1820 mac->stats.xaui_pcs_ctc_err++;
1821 if (cause & F_XAUIPCSALIGNCHANGE)
1822 mac->stats.xaui_pcs_align_change++;
1824 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1825 if (cause & XGM_INTR_FATAL)
1831 * Interrupt handler for PHY events.
1833 int t3_phy_intr_handler(adapter_t *adapter)
1835 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1837 for_each_port(adapter, i) {
1838 struct port_info *p = adap2pinfo(adapter, i);
1840 if (!(p->phy.caps & SUPPORTED_IRQ))
1843 if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
1844 int phy_cause = p->phy.ops->intr_handler(&p->phy);
1846 if (phy_cause & cphy_cause_link_change)
1847 t3_link_changed(adapter, i);
1848 if (phy_cause & cphy_cause_fifo_error)
1849 p->phy.fifo_errors++;
1850 if (phy_cause & cphy_cause_module_change)
1851 t3_os_phymod_changed(adapter, i);
1855 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1860 * t3_slow_intr_handler - control path interrupt handler
1861 * @adapter: the adapter
1863 * T3 interrupt handler for non-data interrupt events, e.g., errors.
1864 * The designation 'slow' is because it involves register reads, while
1865 * data interrupts typically don't involve any MMIOs.
1867 int t3_slow_intr_handler(adapter_t *adapter)
1869 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1871 cause &= adapter->slow_intr_mask;
1874 if (cause & F_PCIM0) {
1875 if (is_pcie(adapter))
1876 pcie_intr_handler(adapter);
1878 pci_intr_handler(adapter);
1881 t3_sge_err_intr_handler(adapter);
1882 if (cause & F_MC7_PMRX)
1883 mc7_intr_handler(&adapter->pmrx);
1884 if (cause & F_MC7_PMTX)
1885 mc7_intr_handler(&adapter->pmtx);
1886 if (cause & F_MC7_CM)
1887 mc7_intr_handler(&adapter->cm);
1889 cim_intr_handler(adapter);
1891 tp_intr_handler(adapter);
1892 if (cause & F_ULP2_RX)
1893 ulprx_intr_handler(adapter);
1894 if (cause & F_ULP2_TX)
1895 ulptx_intr_handler(adapter);
1896 if (cause & F_PM1_RX)
1897 pmrx_intr_handler(adapter);
1898 if (cause & F_PM1_TX)
1899 pmtx_intr_handler(adapter);
1900 if (cause & F_CPL_SWITCH)
1901 cplsw_intr_handler(adapter);
1903 mps_intr_handler(adapter);
1905 t3_mc5_intr_handler(&adapter->mc5);
1906 if (cause & F_XGMAC0_0)
1907 mac_intr_handler(adapter, 0);
1908 if (cause & F_XGMAC0_1)
1909 mac_intr_handler(adapter, 1);
1910 if (cause & F_T3DBG)
1911 t3_os_ext_intr_handler(adapter);
1913 /* Clear the interrupts just processed. */
1914 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1915 (void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1919 static unsigned int calc_gpio_intr(adapter_t *adap)
1921 unsigned int i, gpi_intr = 0;
1923 for_each_port(adap, i)
1924 if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1925 adapter_info(adap)->gpio_intr[i])
1926 gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1931 * t3_intr_enable - enable interrupts
1932 * @adapter: the adapter whose interrupts should be enabled
1934 * Enable interrupts by setting the interrupt enable registers of the
1935 * various HW modules and then enabling the top-level interrupt
1938 void t3_intr_enable(adapter_t *adapter)
1940 static struct addr_val_pair intr_en_avp[] = {
1941 { A_MC7_INT_ENABLE, MC7_INTR_MASK },
1942 { A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1944 { A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1946 { A_MC5_DB_INT_ENABLE, MC5_INTR_MASK },
1947 { A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK },
1948 { A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK },
1949 { A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK },
1950 { A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK },
1951 { A_MPS_INT_ENABLE, MPS_INTR_MASK },
1954 adapter->slow_intr_mask = PL_INTR_MASK;
1956 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1957 t3_write_reg(adapter, A_TP_INT_ENABLE,
1958 adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
1959 t3_write_reg(adapter, A_SG_INT_ENABLE,
1960 adapter->params.rev >= T3_REV_C ?
1961 SGE_INTR_MASK | F_FLEMPTY : SGE_INTR_MASK);
1963 if (adapter->params.rev > 0) {
1964 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1965 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1966 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1967 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1968 F_PBL_BOUND_ERR_CH1);
1970 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1971 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1974 t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
1976 if (is_pcie(adapter))
1977 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1979 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1980 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1981 (void) t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1985 * t3_intr_disable - disable a card's interrupts
1986 * @adapter: the adapter whose interrupts should be disabled
1988 * Disable interrupts. We only disable the top-level interrupt
1989 * concentrator and the SGE data interrupts.
1991 void t3_intr_disable(adapter_t *adapter)
1993 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1994 (void) t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1995 adapter->slow_intr_mask = 0;
1999 * t3_intr_clear - clear all interrupts
2000 * @adapter: the adapter whose interrupts should be cleared
2002 * Clears all interrupts.
2004 void t3_intr_clear(adapter_t *adapter)
2006 static const unsigned int cause_reg_addr[] = {
2008 A_SG_RSPQ_FL_STATUS,
2011 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2012 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2013 A_CIM_HOST_INT_CAUSE,
2026 /* Clear PHY and MAC interrupts for each port. */
2027 for_each_port(adapter, i)
2028 t3_port_intr_clear(adapter, i);
2030 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
2031 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
2033 if (is_pcie(adapter))
2034 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
2035 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
2036 (void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
2040 * t3_port_intr_enable - enable port-specific interrupts
2041 * @adapter: associated adapter
2042 * @idx: index of port whose interrupts should be enabled
2044 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
2047 void t3_port_intr_enable(adapter_t *adapter, int idx)
2049 struct port_info *pi = adap2pinfo(adapter, idx);
2051 t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, XGM_INTR_MASK);
2052 pi->phy.ops->intr_enable(&pi->phy);
2056 * t3_port_intr_disable - disable port-specific interrupts
2057 * @adapter: associated adapter
2058 * @idx: index of port whose interrupts should be disabled
2060 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
2063 void t3_port_intr_disable(adapter_t *adapter, int idx)
2065 struct port_info *pi = adap2pinfo(adapter, idx);
2067 t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, 0);
2068 pi->phy.ops->intr_disable(&pi->phy);
2072 * t3_port_intr_clear - clear port-specific interrupts
2073 * @adapter: associated adapter
2074 * @idx: index of port whose interrupts to clear
2076 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
2079 void t3_port_intr_clear(adapter_t *adapter, int idx)
2081 struct port_info *pi = adap2pinfo(adapter, idx);
2083 t3_write_reg(adapter, A_XGM_INT_CAUSE + pi->mac.offset, 0xffffffff);
2084 pi->phy.ops->intr_clear(&pi->phy);
2087 #define SG_CONTEXT_CMD_ATTEMPTS 100
2090 * t3_sge_write_context - write an SGE context
2091 * @adapter: the adapter
2092 * @id: the context id
2093 * @type: the context type
2095 * Program an SGE context with the values already loaded in the
2096 * CONTEXT_DATA? registers.
2098 static int t3_sge_write_context(adapter_t *adapter, unsigned int id,
2101 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2102 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2103 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2104 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2105 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2106 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2107 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2108 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2111 static int clear_sge_ctxt(adapter_t *adap, unsigned int id, unsigned int type)
2113 t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2114 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2115 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2116 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2117 return t3_sge_write_context(adap, id, type);
2121 * t3_sge_init_ecntxt - initialize an SGE egress context
2122 * @adapter: the adapter to configure
2123 * @id: the context id
2124 * @gts_enable: whether to enable GTS for the context
2125 * @type: the egress context type
2126 * @respq: associated response queue
2127 * @base_addr: base address of queue
2128 * @size: number of queue entries
2130 * @gen: initial generation value for the context
2131 * @cidx: consumer pointer
2133 * Initialize an SGE egress context and make it ready for use. If the
2134 * platform allows concurrent context operations, the caller is
2135 * responsible for appropriate locking.
2137 int t3_sge_init_ecntxt(adapter_t *adapter, unsigned int id, int gts_enable,
2138 enum sge_context_type type, int respq, u64 base_addr,
2139 unsigned int size, unsigned int token, int gen,
2142 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2144 if (base_addr & 0xfff) /* must be 4K aligned */
2146 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2150 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2151 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2152 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2153 V_EC_BASE_LO((u32)base_addr & 0xffff));
2155 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, (u32)base_addr);
2157 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2158 V_EC_BASE_HI((u32)base_addr & 0xf) | V_EC_RESPQ(respq) |
2159 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2161 return t3_sge_write_context(adapter, id, F_EGRESS);
2165 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2166 * @adapter: the adapter to configure
2167 * @id: the context id
2168 * @gts_enable: whether to enable GTS for the context
2169 * @base_addr: base address of queue
2170 * @size: number of queue entries
2171 * @bsize: size of each buffer for this queue
2172 * @cong_thres: threshold to signal congestion to upstream producers
2173 * @gen: initial generation value for the context
2174 * @cidx: consumer pointer
2176 * Initialize an SGE free list context and make it ready for use. The
2177 * caller is responsible for ensuring only one context operation occurs
2180 int t3_sge_init_flcntxt(adapter_t *adapter, unsigned int id, int gts_enable,
2181 u64 base_addr, unsigned int size, unsigned int bsize,
2182 unsigned int cong_thres, int gen, unsigned int cidx)
2184 if (base_addr & 0xfff) /* must be 4K aligned */
2186 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2190 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, (u32)base_addr);
2192 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2193 V_FL_BASE_HI((u32)base_addr) |
2194 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2195 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2196 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2197 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2198 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2199 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2200 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2201 return t3_sge_write_context(adapter, id, F_FREELIST);
2205 * t3_sge_init_rspcntxt - initialize an SGE response queue context
2206 * @adapter: the adapter to configure
2207 * @id: the context id
2208 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2209 * @base_addr: base address of queue
2210 * @size: number of queue entries
2211 * @fl_thres: threshold for selecting the normal or jumbo free list
2212 * @gen: initial generation value for the context
2213 * @cidx: consumer pointer
2215 * Initialize an SGE response queue context and make it ready for use.
2216 * The caller is responsible for ensuring only one context operation
2219 int t3_sge_init_rspcntxt(adapter_t *adapter, unsigned int id, int irq_vec_idx,
2220 u64 base_addr, unsigned int size,
2221 unsigned int fl_thres, int gen, unsigned int cidx)
2223 unsigned int intr = 0;
2225 if (base_addr & 0xfff) /* must be 4K aligned */
2227 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2231 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2233 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
2235 if (irq_vec_idx >= 0)
2236 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2237 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2238 V_CQ_BASE_HI((u32)base_addr) | intr | V_RQ_GEN(gen));
2239 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2240 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2244 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2245 * @adapter: the adapter to configure
2246 * @id: the context id
2247 * @base_addr: base address of queue
2248 * @size: number of queue entries
2249 * @rspq: response queue for async notifications
2250 * @ovfl_mode: CQ overflow mode
2251 * @credits: completion queue credits
2252 * @credit_thres: the credit threshold
2254 * Initialize an SGE completion queue context and make it ready for use.
2255 * The caller is responsible for ensuring only one context operation
2258 int t3_sge_init_cqcntxt(adapter_t *adapter, unsigned int id, u64 base_addr,
2259 unsigned int size, int rspq, int ovfl_mode,
2260 unsigned int credits, unsigned int credit_thres)
2262 if (base_addr & 0xfff) /* must be 4K aligned */
2264 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2268 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2269 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
2271 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2272 V_CQ_BASE_HI((u32)base_addr) | V_CQ_RSPQ(rspq) |
2273 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2274 V_CQ_ERR(ovfl_mode));
2275 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2276 V_CQ_CREDIT_THRES(credit_thres));
2277 return t3_sge_write_context(adapter, id, F_CQ);
2281 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2282 * @adapter: the adapter
2283 * @id: the egress context id
2284 * @enable: enable (1) or disable (0) the context
2286 * Enable or disable an SGE egress context. The caller is responsible for
2287 * ensuring only one context operation occurs at a time.
2289 int t3_sge_enable_ecntxt(adapter_t *adapter, unsigned int id, int enable)
2291 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2294 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2295 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2296 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2297 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2298 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2299 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2300 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2301 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2302 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2306 * t3_sge_disable_fl - disable an SGE free-buffer list
2307 * @adapter: the adapter
2308 * @id: the free list context id
2310 * Disable an SGE free-buffer list. The caller is responsible for
2311 * ensuring only one context operation occurs at a time.
2313 int t3_sge_disable_fl(adapter_t *adapter, unsigned int id)
2315 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2318 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2319 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2320 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2321 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2322 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2323 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2324 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2325 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2326 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2330 * t3_sge_disable_rspcntxt - disable an SGE response queue
2331 * @adapter: the adapter
2332 * @id: the response queue context id
2334 * Disable an SGE response queue. The caller is responsible for
2335 * ensuring only one context operation occurs at a time.
2337 int t3_sge_disable_rspcntxt(adapter_t *adapter, unsigned int id)
2339 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2342 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2343 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2344 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2345 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2346 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2347 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2348 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2349 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2350 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2354 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2355 * @adapter: the adapter
2356 * @id: the completion queue context id
2358 * Disable an SGE completion queue. The caller is responsible for
2359 * ensuring only one context operation occurs at a time.
2361 int t3_sge_disable_cqcntxt(adapter_t *adapter, unsigned int id)
2363 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2366 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2367 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2368 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2369 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2370 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2371 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2372 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2373 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2374 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2378 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2379 * @adapter: the adapter
2380 * @id: the context id
2381 * @op: the operation to perform
2382 * @credits: credits to return to the CQ
2384 * Perform the selected operation on an SGE completion queue context.
2385 * The caller is responsible for ensuring only one context operation
2388 * For most operations the function returns the current HW position in
2389 * the completion queue.
2391 int t3_sge_cqcntxt_op(adapter_t *adapter, unsigned int id, unsigned int op,
2392 unsigned int credits)
2396 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2399 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2400 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2401 V_CONTEXT(id) | F_CQ);
2402 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2403 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2406 if (op >= 2 && op < 7) {
2407 if (adapter->params.rev > 0)
2408 return G_CQ_INDEX(val);
2410 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2411 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2412 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2413 F_CONTEXT_CMD_BUSY, 0,
2414 SG_CONTEXT_CMD_ATTEMPTS, 1))
2416 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2422 * t3_sge_read_context - read an SGE context
2423 * @type: the context type
2424 * @adapter: the adapter
2425 * @id: the context id
2426 * @data: holds the retrieved context
2428 * Read an SGE egress context. The caller is responsible for ensuring
2429 * only one context operation occurs at a time.
2431 static int t3_sge_read_context(unsigned int type, adapter_t *adapter,
2432 unsigned int id, u32 data[4])
2434 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2437 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2438 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2439 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2440 SG_CONTEXT_CMD_ATTEMPTS, 1))
2442 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2443 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2444 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2445 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2450 * t3_sge_read_ecntxt - read an SGE egress context
2451 * @adapter: the adapter
2452 * @id: the context id
2453 * @data: holds the retrieved context
2455 * Read an SGE egress context. The caller is responsible for ensuring
2456 * only one context operation occurs at a time.
2458 int t3_sge_read_ecntxt(adapter_t *adapter, unsigned int id, u32 data[4])
2462 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2466 * t3_sge_read_cq - read an SGE CQ context
2467 * @adapter: the adapter
2468 * @id: the context id
2469 * @data: holds the retrieved context
2471 * Read an SGE CQ context. The caller is responsible for ensuring
2472 * only one context operation occurs at a time.
2474 int t3_sge_read_cq(adapter_t *adapter, unsigned int id, u32 data[4])
2478 return t3_sge_read_context(F_CQ, adapter, id, data);
2482 * t3_sge_read_fl - read an SGE free-list context
2483 * @adapter: the adapter
2484 * @id: the context id
2485 * @data: holds the retrieved context
2487 * Read an SGE free-list context. The caller is responsible for ensuring
2488 * only one context operation occurs at a time.
2490 int t3_sge_read_fl(adapter_t *adapter, unsigned int id, u32 data[4])
2492 if (id >= SGE_QSETS * 2)
2494 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2498 * t3_sge_read_rspq - read an SGE response queue context
2499 * @adapter: the adapter
2500 * @id: the context id
2501 * @data: holds the retrieved context
2503 * Read an SGE response queue context. The caller is responsible for
2504 * ensuring only one context operation occurs at a time.
2506 int t3_sge_read_rspq(adapter_t *adapter, unsigned int id, u32 data[4])
2508 if (id >= SGE_QSETS)
2510 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2514 * t3_config_rss - configure Rx packet steering
2515 * @adapter: the adapter
2516 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2517 * @cpus: values for the CPU lookup table (0xff terminated)
2518 * @rspq: values for the response queue lookup table (0xffff terminated)
2520 * Programs the receive packet steering logic. @cpus and @rspq provide
2521 * the values for the CPU and response queue lookup tables. If they
2522 * provide fewer values than the size of the tables the supplied values
2523 * are used repeatedly until the tables are fully populated.
2525 void t3_config_rss(adapter_t *adapter, unsigned int rss_config, const u8 *cpus,
2528 int i, j, cpu_idx = 0, q_idx = 0;
2531 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2534 for (j = 0; j < 2; ++j) {
2535 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2536 if (cpus[cpu_idx] == 0xff)
2539 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2543 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2544 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2545 (i << 16) | rspq[q_idx++]);
2546 if (rspq[q_idx] == 0xffff)
2550 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2554 * t3_read_rss - read the contents of the RSS tables
2555 * @adapter: the adapter
2556 * @lkup: holds the contents of the RSS lookup table
2557 * @map: holds the contents of the RSS map table
2559 * Reads the contents of the receive packet steering tables.
2561 int t3_read_rss(adapter_t *adapter, u8 *lkup, u16 *map)
2567 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2568 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2570 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2571 if (!(val & 0x80000000))
2574 *lkup++ = (u8)(val >> 8);
2578 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2579 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2581 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2582 if (!(val & 0x80000000))
2590 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2591 * @adap: the adapter
2592 * @enable: 1 to select offload mode, 0 for regular NIC
2594 * Switches TP to NIC/offload mode.
2596 void t3_tp_set_offload_mode(adapter_t *adap, int enable)
2598 if (is_offload(adap) || !enable)
2599 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2600 V_NICMODE(!enable));
2604 * tp_wr_bits_indirect - set/clear bits in an indirect TP register
2605 * @adap: the adapter
2606 * @addr: the indirect TP register address
2607 * @mask: specifies the field within the register to modify
2608 * @val: new value for the field
2610 * Sets a field of an indirect TP register to the given value.
2612 static void tp_wr_bits_indirect(adapter_t *adap, unsigned int addr,
2613 unsigned int mask, unsigned int val)
2615 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2616 val |= t3_read_reg(adap, A_TP_PIO_DATA) & ~mask;
2617 t3_write_reg(adap, A_TP_PIO_DATA, val);
2621 * t3_enable_filters - enable the HW filters
2622 * @adap: the adapter
2624 * Enables the HW filters for NIC traffic.
2626 void t3_enable_filters(adapter_t *adap)
2628 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE, 0);
2629 t3_set_reg_field(adap, A_MC5_DB_CONFIG, 0, F_FILTEREN);
2630 t3_set_reg_field(adap, A_TP_GLOBAL_CONFIG, 0, V_FIVETUPLELOOKUP(3));
2631 tp_wr_bits_indirect(adap, A_TP_INGRESS_CONFIG, 0, F_LOOKUPEVERYPKT);
2635 * pm_num_pages - calculate the number of pages of the payload memory
2636 * @mem_size: the size of the payload memory
2637 * @pg_size: the size of each payload memory page
2639 * Calculate the number of pages, each of the given size, that fit in a
2640 * memory of the specified size, respecting the HW requirement that the
2641 * number of pages must be a multiple of 24.
2643 static inline unsigned int pm_num_pages(unsigned int mem_size,
2644 unsigned int pg_size)
2646 unsigned int n = mem_size / pg_size;
2651 #define mem_region(adap, start, size, reg) \
2652 t3_write_reg((adap), A_ ## reg, (start)); \
2656 * partition_mem - partition memory and configure TP memory settings
2657 * @adap: the adapter
2658 * @p: the TP parameters
2660 * Partitions context and payload memory and configures TP's memory
2663 static void partition_mem(adapter_t *adap, const struct tp_params *p)
2665 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2666 unsigned int timers = 0, timers_shift = 22;
2668 if (adap->params.rev > 0) {
2669 if (tids <= 16 * 1024) {
2672 } else if (tids <= 64 * 1024) {
2675 } else if (tids <= 256 * 1024) {
2681 t3_write_reg(adap, A_TP_PMM_SIZE,
2682 p->chan_rx_size | (p->chan_tx_size >> 16));
2684 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2685 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2686 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2687 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2688 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2690 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2691 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2692 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2694 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2695 /* Add a bit of headroom and make multiple of 24 */
2697 pstructs -= pstructs % 24;
2698 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2700 m = tids * TCB_SIZE;
2701 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2702 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2703 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2704 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2705 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2706 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2707 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2708 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2710 m = (m + 4095) & ~0xfff;
2711 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2712 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2714 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2715 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2716 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2718 adap->params.mc5.nservers += m - tids;
2721 static inline void tp_wr_indirect(adapter_t *adap, unsigned int addr, u32 val)
2723 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2724 t3_write_reg(adap, A_TP_PIO_DATA, val);
2727 static void tp_config(adapter_t *adap, const struct tp_params *p)
2729 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2730 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2731 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2732 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2733 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2734 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2735 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2736 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2737 V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
2738 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2739 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
2740 F_IPV6ENABLE | F_NICMODE);
2741 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2742 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2743 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2744 adap->params.rev > 0 ? F_ENABLEESND :
2746 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2748 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2749 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2750 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2751 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2752 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
2753 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2754 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2756 if (adap->params.rev > 0) {
2757 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2758 t3_set_reg_field(adap, A_TP_PARA_REG3, 0,
2759 F_TXPACEAUTO | F_TXPACEAUTOSTRICT);
2760 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2761 tp_wr_indirect(adap, A_TP_VLAN_PRI_MAP, 0xfa50);
2762 tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP0, 0xfac688);
2763 tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP1, 0xfac688);
2765 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2767 if (adap->params.rev == T3_REV_C)
2768 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2769 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2770 V_TABLELATENCYDELTA(4));
2772 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2773 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2774 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2775 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2777 if (adap->params.nports > 2) {
2778 t3_set_reg_field(adap, A_TP_PC_CONFIG2, 0,
2779 F_ENABLETXPORTFROMDA2 | F_ENABLETXPORTFROMDA |
2780 F_ENABLERXPORTFROMADDR);
2781 tp_wr_bits_indirect(adap, A_TP_QOS_RX_MAP_MODE,
2782 V_RXMAPMODE(M_RXMAPMODE), 0);
2783 tp_wr_indirect(adap, A_TP_INGRESS_CONFIG, V_BITPOS0(48) |
2784 V_BITPOS1(49) | V_BITPOS2(50) | V_BITPOS3(51) |
2785 F_ENABLEEXTRACT | F_ENABLEEXTRACTIONSFD |
2786 F_ENABLEINSERTION | F_ENABLEINSERTIONSFD);
2787 tp_wr_indirect(adap, A_TP_PREAMBLE_MSB, 0xfb000000);
2788 tp_wr_indirect(adap, A_TP_PREAMBLE_LSB, 0xd5);
2789 tp_wr_indirect(adap, A_TP_INTF_FROM_TX_PKT, F_INTFFROMTXPKT);
2793 /* TCP timer values in ms */
2794 #define TP_DACK_TIMER 50
2795 #define TP_RTO_MIN 250
2798 * tp_set_timers - set TP timing parameters
2799 * @adap: the adapter to set
2800 * @core_clk: the core clock frequency in Hz
2802 * Set TP's timing parameters, such as the various timer resolutions and
2803 * the TCP timer values.
2805 static void tp_set_timers(adapter_t *adap, unsigned int core_clk)
2807 unsigned int tre = adap->params.tp.tre;
2808 unsigned int dack_re = adap->params.tp.dack_re;
2809 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2810 unsigned int tps = core_clk >> tre;
2812 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2813 V_DELAYEDACKRESOLUTION(dack_re) |
2814 V_TIMESTAMPRESOLUTION(tstamp_re));
2815 t3_write_reg(adap, A_TP_DACK_TIMER,
2816 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2817 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2818 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2819 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2820 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2821 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2822 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2823 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2826 #define SECONDS * tps
2828 t3_write_reg(adap, A_TP_MSL,
2829 adap->params.rev > 0 ? 0 : 2 SECONDS);
2830 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2831 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2832 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2833 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2834 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2835 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2836 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2837 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2842 #ifdef CONFIG_CHELSIO_T3_CORE
2844 * t3_tp_set_coalescing_size - set receive coalescing size
2845 * @adap: the adapter
2846 * @size: the receive coalescing size
2847 * @psh: whether a set PSH bit should deliver coalesced data
2849 * Set the receive coalescing size and PSH bit handling.
2851 int t3_tp_set_coalescing_size(adapter_t *adap, unsigned int size, int psh)
2855 if (size > MAX_RX_COALESCING_LEN)
2858 val = t3_read_reg(adap, A_TP_PARA_REG3);
2859 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2862 val |= F_RXCOALESCEENABLE;
2864 val |= F_RXCOALESCEPSHEN;
2865 size = min(MAX_RX_COALESCING_LEN, size);
2866 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2867 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2869 t3_write_reg(adap, A_TP_PARA_REG3, val);
2874 * t3_tp_set_max_rxsize - set the max receive size
2875 * @adap: the adapter
2876 * @size: the max receive size
2878 * Set TP's max receive size. This is the limit that applies when
2879 * receive coalescing is disabled.
2881 void t3_tp_set_max_rxsize(adapter_t *adap, unsigned int size)
2883 t3_write_reg(adap, A_TP_PARA_REG7,
2884 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2887 static void __devinit init_mtus(unsigned short mtus[])
2890 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2891 * it can accomodate max size TCP/IP headers when SACK and timestamps
2892 * are enabled and still have at least 8 bytes of payload.
2913 * init_cong_ctrl - initialize congestion control parameters
2914 * @a: the alpha values for congestion control
2915 * @b: the beta values for congestion control
2917 * Initialize the congestion control parameters.
2919 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
2921 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2946 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2949 b[13] = b[14] = b[15] = b[16] = 3;
2950 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2951 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2956 /* The minimum additive increment value for the congestion control table */
2957 #define CC_MIN_INCR 2U
2960 * t3_load_mtus - write the MTU and congestion control HW tables
2961 * @adap: the adapter
2962 * @mtus: the unrestricted values for the MTU table
2963 * @alpha: the values for the congestion control alpha parameter
2964 * @beta: the values for the congestion control beta parameter
2965 * @mtu_cap: the maximum permitted effective MTU
2967 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2968 * Update the high-speed congestion control table with the supplied alpha,
2971 void t3_load_mtus(adapter_t *adap, unsigned short mtus[NMTUS],
2972 unsigned short alpha[NCCTRL_WIN],
2973 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2975 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2976 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2977 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2978 28672, 40960, 57344, 81920, 114688, 163840, 229376 };
2982 for (i = 0; i < NMTUS; ++i) {
2983 unsigned int mtu = min(mtus[i], mtu_cap);
2984 unsigned int log2 = fls(mtu);
2986 if (!(mtu & ((1 << log2) >> 2))) /* round */
2988 t3_write_reg(adap, A_TP_MTU_TABLE,
2989 (i << 24) | (log2 << 16) | mtu);
2991 for (w = 0; w < NCCTRL_WIN; ++w) {
2994 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2997 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2998 (w << 16) | (beta[w] << 13) | inc);
3004 * t3_read_hw_mtus - returns the values in the HW MTU table
3005 * @adap: the adapter
3006 * @mtus: where to store the HW MTU values
3008 * Reads the HW MTU table.
3010 void t3_read_hw_mtus(adapter_t *adap, unsigned short mtus[NMTUS])
3014 for (i = 0; i < NMTUS; ++i) {
3017 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
3018 val = t3_read_reg(adap, A_TP_MTU_TABLE);
3019 mtus[i] = val & 0x3fff;
3024 * t3_get_cong_cntl_tab - reads the congestion control table
3025 * @adap: the adapter
3026 * @incr: where to store the alpha values
3028 * Reads the additive increments programmed into the HW congestion
3031 void t3_get_cong_cntl_tab(adapter_t *adap,
3032 unsigned short incr[NMTUS][NCCTRL_WIN])
3034 unsigned int mtu, w;
3036 for (mtu = 0; mtu < NMTUS; ++mtu)
3037 for (w = 0; w < NCCTRL_WIN; ++w) {
3038 t3_write_reg(adap, A_TP_CCTRL_TABLE,
3039 0xffff0000 | (mtu << 5) | w);
3040 incr[mtu][w] = (unsigned short)t3_read_reg(adap,
3041 A_TP_CCTRL_TABLE) & 0x1fff;
3046 * t3_tp_get_mib_stats - read TP's MIB counters
3047 * @adap: the adapter
3048 * @tps: holds the returned counter values
3050 * Returns the values of TP's MIB counters.
3052 void t3_tp_get_mib_stats(adapter_t *adap, struct tp_mib_stats *tps)
3054 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *)tps,
3055 sizeof(*tps) / sizeof(u32), 0);
3059 * t3_read_pace_tbl - read the pace table
3060 * @adap: the adapter
3061 * @pace_vals: holds the returned values
3063 * Returns the values of TP's pace table in nanoseconds.
3065 void t3_read_pace_tbl(adapter_t *adap, unsigned int pace_vals[NTX_SCHED])
3067 unsigned int i, tick_ns = dack_ticks_to_usec(adap, 1000);
3069 for (i = 0; i < NTX_SCHED; i++) {
3070 t3_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
3071 pace_vals[i] = t3_read_reg(adap, A_TP_PACE_TABLE) * tick_ns;
3076 * t3_set_pace_tbl - set the pace table
3077 * @adap: the adapter
3078 * @pace_vals: the pace values in nanoseconds
3079 * @start: index of the first entry in the HW pace table to set
3080 * @n: how many entries to set
3082 * Sets (a subset of the) HW pace table.
3084 void t3_set_pace_tbl(adapter_t *adap, unsigned int *pace_vals,
3085 unsigned int start, unsigned int n)
3087 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
3089 for ( ; n; n--, start++, pace_vals++)
3090 t3_write_reg(adap, A_TP_PACE_TABLE, (start << 16) |
3091 ((*pace_vals + tick_ns / 2) / tick_ns));
3094 #define ulp_region(adap, name, start, len) \
3095 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
3096 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
3097 (start) + (len) - 1); \
3100 #define ulptx_region(adap, name, start, len) \
3101 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
3102 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
3103 (start) + (len) - 1)
3105 static void ulp_config(adapter_t *adap, const struct tp_params *p)
3107 unsigned int m = p->chan_rx_size;
3109 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
3110 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
3111 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
3112 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
3113 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
3114 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
3115 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
3116 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
3121 * t3_set_proto_sram - set the contents of the protocol sram
3122 * @adapter: the adapter
3123 * @data: the protocol image
3125 * Write the contents of the protocol SRAM.
3127 int t3_set_proto_sram(adapter_t *adap, const u8 *data)
3130 const u32 *buf = (const u32 *)data;
3132 for (i = 0; i < PROTO_SRAM_LINES; i++) {
3133 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, cpu_to_be32(*buf++));
3134 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, cpu_to_be32(*buf++));
3135 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, cpu_to_be32(*buf++));
3136 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, cpu_to_be32(*buf++));
3137 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, cpu_to_be32(*buf++));
3139 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
3140 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
3148 * t3_config_trace_filter - configure one of the tracing filters
3149 * @adapter: the adapter
3150 * @tp: the desired trace filter parameters
3151 * @filter_index: which filter to configure
3152 * @invert: if set non-matching packets are traced instead of matching ones
3153 * @enable: whether to enable or disable the filter
3155 * Configures one of the tracing filters available in HW.
3157 void t3_config_trace_filter(adapter_t *adapter, const struct trace_params *tp,
3158 int filter_index, int invert, int enable)
3160 u32 addr, key[4], mask[4];
3162 key[0] = tp->sport | (tp->sip << 16);
3163 key[1] = (tp->sip >> 16) | (tp->dport << 16);
3165 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
3167 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
3168 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
3169 mask[2] = tp->dip_mask;
3170 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
3173 key[3] |= (1 << 29);
3175 key[3] |= (1 << 28);
3177 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3178 tp_wr_indirect(adapter, addr++, key[0]);
3179 tp_wr_indirect(adapter, addr++, mask[0]);
3180 tp_wr_indirect(adapter, addr++, key[1]);
3181 tp_wr_indirect(adapter, addr++, mask[1]);
3182 tp_wr_indirect(adapter, addr++, key[2]);
3183 tp_wr_indirect(adapter, addr++, mask[2]);
3184 tp_wr_indirect(adapter, addr++, key[3]);
3185 tp_wr_indirect(adapter, addr, mask[3]);
3186 (void) t3_read_reg(adapter, A_TP_PIO_DATA);
3190 * t3_config_sched - configure a HW traffic scheduler
3191 * @adap: the adapter
3192 * @kbps: target rate in Kbps
3193 * @sched: the scheduler index
3195 * Configure a Tx HW scheduler for the target rate.
3197 int t3_config_sched(adapter_t *adap, unsigned int kbps, int sched)
3199 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3200 unsigned int clk = adap->params.vpd.cclk * 1000;
3201 unsigned int selected_cpt = 0, selected_bpt = 0;
3204 kbps *= 125; /* -> bytes */
3205 for (cpt = 1; cpt <= 255; cpt++) {
3207 bpt = (kbps + tps / 2) / tps;
3208 if (bpt > 0 && bpt <= 255) {
3210 delta = v >= kbps ? v - kbps : kbps - v;
3211 if (delta < mindelta) {
3216 } else if (selected_cpt)
3222 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3223 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3224 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3226 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3228 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3229 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3234 * t3_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
3235 * @adap: the adapter
3236 * @sched: the scheduler index
3237 * @ipg: the interpacket delay in tenths of nanoseconds
3239 * Set the interpacket delay for a HW packet rate scheduler.
3241 int t3_set_sched_ipg(adapter_t *adap, int sched, unsigned int ipg)
3243 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3245 /* convert ipg to nearest number of core clocks */
3246 ipg *= core_ticks_per_usec(adap);
3247 ipg = (ipg + 5000) / 10000;
3251 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3252 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3254 v = (v & 0xffff) | (ipg << 16);
3256 v = (v & 0xffff0000) | ipg;
3257 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3258 t3_read_reg(adap, A_TP_TM_PIO_DATA);
3263 * t3_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3264 * @adap: the adapter
3265 * @sched: the scheduler index
3266 * @kbps: the byte rate in Kbps
3267 * @ipg: the interpacket delay in tenths of nanoseconds
3269 * Return the current configuration of a HW Tx scheduler.
3271 void t3_get_tx_sched(adapter_t *adap, unsigned int sched, unsigned int *kbps,
3274 unsigned int v, addr, bpt, cpt;
3277 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3278 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3279 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3282 bpt = (v >> 8) & 0xff;
3285 *kbps = 0; /* scheduler disabled */
3287 v = (adap->params.vpd.cclk * 1000) / cpt;
3288 *kbps = (v * bpt) / 125;
3292 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3293 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3294 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3298 *ipg = (10000 * v) / core_ticks_per_usec(adap);
3303 * tp_init - configure TP
3304 * @adap: the adapter
3305 * @p: TP configuration parameters
3307 * Initializes the TP HW module.
3309 static int tp_init(adapter_t *adap, const struct tp_params *p)
3314 t3_set_vlan_accel(adap, 3, 0);
3316 if (is_offload(adap)) {
3317 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3318 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3319 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3322 CH_ERR(adap, "TP initialization timed out\n");
3326 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3331 * t3_mps_set_active_ports - configure port failover
3332 * @adap: the adapter
3333 * @port_mask: bitmap of active ports
3335 * Sets the active ports according to the supplied bitmap.
3337 int t3_mps_set_active_ports(adapter_t *adap, unsigned int port_mask)
3339 if (port_mask & ~((1 << adap->params.nports) - 1))
3341 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3342 port_mask << S_PORT0ACTIVE);
3347 * chan_init_hw - channel-dependent HW initialization
3348 * @adap: the adapter
3349 * @chan_map: bitmap of Tx channels being used
3351 * Perform the bits of HW initialization that are dependent on the Tx
3352 * channels being used.
3354 static void chan_init_hw(adapter_t *adap, unsigned int chan_map)
3358 if (chan_map != 3) { /* one channel */
3359 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3360 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3361 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3362 (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3363 F_TPTXPORT1EN | F_PORT1ACTIVE));
3364 t3_write_reg(adap, A_PM1_TX_CFG,
3365 chan_map == 1 ? 0xffffffff : 0);
3367 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3368 V_TX_MOD_QUEUE_REQ_MAP(0xff));
3369 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xd9c8);
3370 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfbea);
3371 } else { /* two channels */
3372 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3373 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3374 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3375 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3376 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3377 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3379 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3380 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3381 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3382 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3383 for (i = 0; i < 16; i++)
3384 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3385 (i << 16) | 0x1010);
3386 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xba98);
3387 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfedc);
3391 static int calibrate_xgm(adapter_t *adapter)
3393 if (uses_xaui(adapter)) {
3396 for (i = 0; i < 5; ++i) {
3397 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3398 (void) t3_read_reg(adapter, A_XGM_XAUI_IMP);
3400 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3401 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3402 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3403 V_XAUIIMP(G_CALIMP(v) >> 2));
3407 CH_ERR(adapter, "MAC calibration failed\n");
3410 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3411 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3412 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3413 F_XGM_IMPSETUPDATE);
3418 static void calibrate_xgm_t3b(adapter_t *adapter)
3420 if (!uses_xaui(adapter)) {
3421 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3422 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3423 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3424 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3425 F_XGM_IMPSETUPDATE);
3426 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3428 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3429 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3433 struct mc7_timing_params {
3434 unsigned char ActToPreDly;
3435 unsigned char ActToRdWrDly;
3436 unsigned char PreCyc;
3437 unsigned char RefCyc[5];
3438 unsigned char BkCyc;
3439 unsigned char WrToRdDly;
3440 unsigned char RdToWrDly;
3444 * Write a value to a register and check that the write completed. These
3445 * writes normally complete in a cycle or two, so one read should suffice.
3446 * The very first read exists to flush the posted write to the device.
3448 static int wrreg_wait(adapter_t *adapter, unsigned int addr, u32 val)
3450 t3_write_reg(adapter, addr, val);
3451 (void) t3_read_reg(adapter, addr); /* flush */
3452 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3454 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3458 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3460 static const unsigned int mc7_mode[] = {
3461 0x632, 0x642, 0x652, 0x432, 0x442
3463 static const struct mc7_timing_params mc7_timings[] = {
3464 { 12, 3, 4, { 20, 28, 34, 52, 0 }, 15, 6, 4 },
3465 { 12, 4, 5, { 20, 28, 34, 52, 0 }, 16, 7, 4 },
3466 { 12, 5, 6, { 20, 28, 34, 52, 0 }, 17, 8, 4 },
3467 { 9, 3, 4, { 15, 21, 26, 39, 0 }, 12, 6, 4 },
3468 { 9, 4, 5, { 15, 21, 26, 39, 0 }, 13, 7, 4 }
3472 unsigned int width, density, slow, attempts;
3473 adapter_t *adapter = mc7->adapter;
3474 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3479 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3480 slow = val & F_SLOW;
3481 width = G_WIDTH(val);
3482 density = G_DEN(val);
3484 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3485 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3489 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3490 (void) t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3492 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3493 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3494 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3500 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3501 V_ACTTOPREDLY(p->ActToPreDly) |
3502 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3503 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3504 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3506 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3507 val | F_CLKEN | F_TERM150);
3508 (void) t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3511 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3516 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3517 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3518 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3519 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3523 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3524 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL,
3529 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3530 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3531 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3532 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3533 mc7_mode[mem_type]) ||
3534 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3535 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3538 /* clock value is in KHz */
3539 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3540 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3542 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3543 F_PERREFEN | V_PREREFDIV(mc7_clock));
3544 (void) t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3546 t3_write_reg(adapter, mc7->offset + A_MC7_ECC,
3547 F_ECCGENEN | F_ECCCHKEN);
3548 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3549 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3550 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3551 (mc7->size << width) - 1);
3552 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3553 (void) t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3558 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3559 } while ((val & F_BUSY) && --attempts);
3561 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3565 /* Enable normal memory accesses. */
3566 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3573 static void config_pcie(adapter_t *adap)
3575 static const u16 ack_lat[4][6] = {
3576 { 237, 416, 559, 1071, 2095, 4143 },
3577 { 128, 217, 289, 545, 1057, 2081 },
3578 { 73, 118, 154, 282, 538, 1050 },
3579 { 67, 107, 86, 150, 278, 534 }
3581 static const u16 rpl_tmr[4][6] = {
3582 { 711, 1248, 1677, 3213, 6285, 12429 },
3583 { 384, 651, 867, 1635, 3171, 6243 },
3584 { 219, 354, 462, 846, 1614, 3150 },
3585 { 201, 321, 258, 450, 834, 1602 }
3589 unsigned int log2_width, pldsize;
3590 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3592 t3_os_pci_read_config_2(adap,
3593 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3595 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3597 t3_os_pci_read_config_2(adap,
3598 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3601 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3602 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3603 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3604 log2_width = fls(adap->params.pci.width) - 1;
3605 acklat = ack_lat[log2_width][pldsize];
3606 if (val & 1) /* check LOsEnable */
3607 acklat += fst_trn_tx * 4;
3608 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3610 if (adap->params.rev == 0)
3611 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3612 V_T3A_ACKLAT(M_T3A_ACKLAT),
3613 V_T3A_ACKLAT(acklat));
3615 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3618 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3619 V_REPLAYLMT(rpllmt));
3621 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3622 t3_set_reg_field(adap, A_PCIE_CFG, 0,
3623 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
3627 * t3_init_hw - initialize and configure T3 HW modules
3628 * @adapter: the adapter
3629 * @fw_params: initial parameters to pass to firmware (optional)
3631 * Initialize and configure T3 HW modules. This performs the
3632 * initialization steps that need to be done once after a card is reset.
3633 * MAC and PHY initialization is handled separarely whenever a port is
3636 * @fw_params are passed to FW and their value is platform dependent.
3637 * Only the top 8 bits are available for use, the rest must be 0.
3639 int t3_init_hw(adapter_t *adapter, u32 fw_params)
3641 int err = -EIO, attempts, i;
3642 const struct vpd_params *vpd = &adapter->params.vpd;
3644 if (adapter->params.rev > 0)
3645 calibrate_xgm_t3b(adapter);
3646 else if (calibrate_xgm(adapter))
3649 if (adapter->params.nports > 2)
3650 t3_mac_reset(&adap2pinfo(adapter, 0)->mac);
3653 partition_mem(adapter, &adapter->params.tp);
3655 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3656 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3657 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3658 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3659 adapter->params.mc5.nfilters,
3660 adapter->params.mc5.nroutes))
3663 for (i = 0; i < 32; i++)
3664 if (clear_sge_ctxt(adapter, i, F_CQ))
3668 if (tp_init(adapter, &adapter->params.tp))
3671 #ifdef CONFIG_CHELSIO_T3_CORE
3672 t3_tp_set_coalescing_size(adapter,
3673 min(adapter->params.sge.max_pkt_size,
3674 MAX_RX_COALESCING_LEN), 1);
3675 t3_tp_set_max_rxsize(adapter,
3676 min(adapter->params.sge.max_pkt_size, 16384U));
3677 ulp_config(adapter, &adapter->params.tp);
3679 if (is_pcie(adapter))
3680 config_pcie(adapter);
3682 t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3683 F_DMASTOPEN | F_CLIDECEN);
3685 if (adapter->params.rev == T3_REV_C)
3686 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3687 F_CFG_CQE_SOP_MASK);
3689 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3690 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3691 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3692 chan_init_hw(adapter, adapter->params.chan_map);
3693 t3_sge_init(adapter, &adapter->params.sge);
3695 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3697 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3698 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3699 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3700 (void) t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3703 do { /* wait for uP to initialize */
3705 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3707 CH_ERR(adapter, "uP initialization timed out\n");
3717 * get_pci_mode - determine a card's PCI mode
3718 * @adapter: the adapter
3719 * @p: where to store the PCI settings
3721 * Determines a card's PCI mode and associated parameters, such as speed
3724 static void __devinit get_pci_mode(adapter_t *adapter, struct pci_params *p)
3726 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3727 u32 pci_mode, pcie_cap;
3729 pcie_cap = t3_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
3733 p->variant = PCI_VARIANT_PCIE;
3734 p->pcie_cap_addr = pcie_cap;
3735 t3_os_pci_read_config_2(adapter, pcie_cap + PCI_EXP_LNKSTA,
3737 p->width = (val >> 4) & 0x3f;
3741 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3742 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3743 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3744 pci_mode = G_PCIXINITPAT(pci_mode);
3746 p->variant = PCI_VARIANT_PCI;
3747 else if (pci_mode < 4)
3748 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3749 else if (pci_mode < 8)
3750 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3752 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3756 * init_link_config - initialize a link's SW state
3757 * @lc: structure holding the link state
3758 * @caps: link capabilities
3760 * Initializes the SW state maintained for each link, including the link's
3761 * capabilities and default speed/duplex/flow-control/autonegotiation
3764 static void __devinit init_link_config(struct link_config *lc,
3767 lc->supported = caps;
3768 lc->requested_speed = lc->speed = SPEED_INVALID;
3769 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3770 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3771 if (lc->supported & SUPPORTED_Autoneg) {
3772 lc->advertising = lc->supported;
3773 lc->autoneg = AUTONEG_ENABLE;
3774 lc->requested_fc |= PAUSE_AUTONEG;
3776 lc->advertising = 0;
3777 lc->autoneg = AUTONEG_DISABLE;
3782 * mc7_calc_size - calculate MC7 memory size
3783 * @cfg: the MC7 configuration
3785 * Calculates the size of an MC7 memory in bytes from the value of its
3786 * configuration register.
3788 static unsigned int __devinit mc7_calc_size(u32 cfg)
3790 unsigned int width = G_WIDTH(cfg);
3791 unsigned int banks = !!(cfg & F_BKS) + 1;
3792 unsigned int org = !!(cfg & F_ORG) + 1;
3793 unsigned int density = G_DEN(cfg);
3794 unsigned int MBs = ((256 << density) * banks) / (org << width);
3799 static void __devinit mc7_prep(adapter_t *adapter, struct mc7 *mc7,
3800 unsigned int base_addr, const char *name)
3804 mc7->adapter = adapter;
3806 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3807 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3808 mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3809 mc7->width = G_WIDTH(cfg);
3812 void mac_prep(struct cmac *mac, adapter_t *adapter, int index)
3814 mac->adapter = adapter;
3815 mac->multiport = adapter->params.nports > 2;
3816 if (mac->multiport) {
3817 mac->ext_port = (unsigned char)index;
3823 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3825 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3826 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3827 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3828 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3834 * early_hw_init - HW initialization done at card detection time
3835 * @adapter: the adapter
3836 * @ai: contains information about the adapter type and properties
3838 * Perfoms the part of HW initialization that is done early on when the
3839 * driver first detecs the card. Most of the HW state is initialized
3840 * lazily later on when a port or an offload function are first used.
3842 void early_hw_init(adapter_t *adapter, const struct adapter_info *ai)
3844 u32 val = V_PORTSPEED(is_10G(adapter) || adapter->params.nports > 2 ?
3847 mi1_init(adapter, ai);
3848 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3849 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3850 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3851 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3852 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3853 t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
3855 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3858 /* Enable MAC clocks so we can access the registers */
3859 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3860 (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
3862 val |= F_CLKDIVRESET_;
3863 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3864 (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
3865 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3866 (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
3870 * t3_reset_adapter - reset the adapter
3871 * @adapter: the adapter
3873 * Reset the adapter.
3875 static int t3_reset_adapter(adapter_t *adapter)
3877 int i, save_and_restore_pcie =
3878 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3881 if (save_and_restore_pcie)
3882 t3_os_pci_save_state(adapter);
3883 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3886 * Delay. Give Some time to device to reset fully.
3887 * XXX The delay time should be modified.
3889 for (i = 0; i < 10; i++) {
3891 t3_os_pci_read_config_2(adapter, 0x00, &devid);
3892 if (devid == 0x1425)
3896 if (devid != 0x1425)
3899 if (save_and_restore_pcie)
3900 t3_os_pci_restore_state(adapter);
3904 static int init_parity(adapter_t *adap)
3908 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3911 for (err = i = 0; !err && i < 16; i++)
3912 err = clear_sge_ctxt(adap, i, F_EGRESS);
3913 for (i = 0xfff0; !err && i <= 0xffff; i++)
3914 err = clear_sge_ctxt(adap, i, F_EGRESS);
3915 for (i = 0; !err && i < SGE_QSETS; i++)
3916 err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3920 t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3921 for (i = 0; i < 4; i++)
3922 for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3923 t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3924 F_IBQDBGWR | V_IBQDBGQID(i) |
3925 V_IBQDBGADDR(addr));
3926 err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3927 F_IBQDBGBUSY, 0, 2, 1);
3935 * t3_prep_adapter - prepare SW and HW for operation
3936 * @adapter: the adapter
3937 * @ai: contains information about the adapter type and properties
3939 * Initialize adapter SW state for the various HW modules, set initial
3940 * values for some adapter tunables, take PHYs out of reset, and
3941 * initialize the MDIO interface.
3943 int __devinit t3_prep_adapter(adapter_t *adapter,
3944 const struct adapter_info *ai, int reset)
3947 unsigned int i, j = 0;
3949 get_pci_mode(adapter, &adapter->params.pci);
3951 adapter->params.info = ai;
3952 adapter->params.nports = ai->nports0 + ai->nports1;
3953 adapter->params.chan_map = !!ai->nports0 | (!!ai->nports1 << 1);
3954 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3955 adapter->params.linkpoll_period = 0;
3956 if (adapter->params.nports > 2)
3957 adapter->params.stats_update_period = VSC_STATS_ACCUM_SECS;
3959 adapter->params.stats_update_period = is_10G(adapter) ?
3960 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3961 adapter->params.pci.vpd_cap_addr =
3962 t3_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
3964 ret = get_vpd_params(adapter, &adapter->params.vpd);
3968 if (reset && t3_reset_adapter(adapter))
3971 t3_sge_prep(adapter, &adapter->params.sge);
3973 if (adapter->params.vpd.mclk) {
3974 struct tp_params *p = &adapter->params.tp;
3976 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3977 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3978 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3980 p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
3981 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3982 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3983 p->cm_size = t3_mc7_size(&adapter->cm);
3984 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3985 p->chan_tx_size = p->pmtx_size / p->nchan;
3986 p->rx_pg_size = 64 * 1024;
3987 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3988 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3989 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3990 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3991 adapter->params.rev > 0 ? 12 : 6;
3992 p->tre = fls(adapter->params.vpd.cclk / (1000 / TP_TMR_RES)) -
3994 p->dack_re = fls(adapter->params.vpd.cclk / 10) - 1; /* 100us */
3997 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3998 t3_mc7_size(&adapter->pmtx) &&
3999 t3_mc7_size(&adapter->cm);
4001 if (is_offload(adapter)) {
4002 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
4003 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
4004 DEFAULT_NFILTERS : 0;
4005 adapter->params.mc5.nroutes = 0;
4006 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
4008 #ifdef CONFIG_CHELSIO_T3_CORE
4009 init_mtus(adapter->params.mtus);
4010 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
4014 early_hw_init(adapter, ai);
4015 ret = init_parity(adapter);
4019 if (adapter->params.nports > 2 &&
4020 (ret = t3_vsc7323_init(adapter, adapter->params.nports)))
4023 for_each_port(adapter, i) {
4025 const struct port_type_info *pti;
4026 struct port_info *p = adap2pinfo(adapter, i);
4028 while (!adapter->params.vpd.port_type[j])
4031 pti = &port_types[adapter->params.vpd.port_type[j]];
4032 ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
4036 mac_prep(&p->mac, adapter, j);
4040 * The VPD EEPROM stores the base Ethernet address for the
4041 * card. A port's address is derived from the base by adding
4042 * the port's index to the base's low octet.
4044 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
4045 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
4047 t3_os_set_hw_addr(adapter, i, hw_addr);
4048 init_link_config(&p->link_config, p->phy.caps);
4049 p->phy.ops->power_down(&p->phy, 1);
4050 if (!(p->phy.caps & SUPPORTED_IRQ))
4051 adapter->params.linkpoll_period = 10;
4058 * t3_reinit_adapter - prepare HW for operation again
4059 * @adapter: the adapter
4061 * Put HW in the same state as @t3_prep_adapter without any changes to
4062 * SW state. This is a cut down version of @t3_prep_adapter intended
4063 * to be used after events that wipe out HW state but preserve SW state,
4064 * e.g., EEH. The device must be reset before calling this.
4066 int t3_reinit_adapter(adapter_t *adap)
4071 early_hw_init(adap, adap->params.info);
4072 ret = init_parity(adap);
4076 if (adap->params.nports > 2 &&
4077 (ret = t3_vsc7323_init(adap, adap->params.nports)))
4080 for_each_port(adap, i) {
4081 const struct port_type_info *pti;
4082 struct port_info *p = adap2pinfo(adap, i);
4084 while (!adap->params.vpd.port_type[++j])
4087 pti = &port_types[adap->params.vpd.port_type[j]];
4088 ret = pti->phy_prep(&p->phy, adap, p->phy.addr, NULL);
4091 p->phy.ops->power_down(&p->phy, 1);
4096 void t3_led_ready(adapter_t *adapter)
4098 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
4102 void t3_port_failover(adapter_t *adapter, int port)
4106 val = port ? F_PORT1ACTIVE : F_PORT0ACTIVE;
4107 t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4111 void t3_failover_done(adapter_t *adapter, int port)
4113 t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4114 F_PORT0ACTIVE | F_PORT1ACTIVE);
4117 void t3_failover_clear(adapter_t *adapter)
4119 t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4120 F_PORT0ACTIVE | F_PORT1ACTIVE);