1 /**************************************************************************
3 Copyright (c) 2007, Chelsio Inc.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
28 ***************************************************************************/
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
35 #include <cxgb_include.h>
37 #include <dev/cxgb/cxgb_include.h>
40 #define DENTER() printf("entered %s\n", __FUNCTION__);
41 #define DEXIT() printf("exiting %s\n", __FUNCTION__);
45 * t3_wait_op_done_val - wait until an operation is completed
46 * @adapter: the adapter performing the operation
47 * @reg: the register to check for completion
48 * @mask: a single-bit field within @reg that indicates completion
49 * @polarity: the value of the field when the operation is completed
50 * @attempts: number of check iterations
51 * @delay: delay in usecs between iterations
52 * @valp: where to store the value of the register at completion time
54 * Wait until an operation is completed by checking a bit in a register
55 * up to @attempts times. If @valp is not NULL the value of the register
56 * at the time it indicated completion is stored there. Returns 0 if the
57 * operation completes and -EAGAIN otherwise.
59 int t3_wait_op_done_val(adapter_t *adapter, int reg, u32 mask, int polarity,
60 int attempts, int delay, u32 *valp)
63 u32 val = t3_read_reg(adapter, reg);
65 if (!!(val & mask) == polarity) {
78 * t3_write_regs - write a bunch of registers
79 * @adapter: the adapter to program
80 * @p: an array of register address/register value pairs
81 * @n: the number of address/value pairs
82 * @offset: register address offset
84 * Takes an array of register address/register value pairs and writes each
85 * value to the corresponding register. Register addresses are adjusted
86 * by the supplied offset.
88 void t3_write_regs(adapter_t *adapter, const struct addr_val_pair *p, int n,
92 t3_write_reg(adapter, p->reg_addr + offset, p->val);
98 * t3_set_reg_field - set a register field to a value
99 * @adapter: the adapter to program
100 * @addr: the register address
101 * @mask: specifies the portion of the register to modify
102 * @val: the new value for the register field
104 * Sets a register field specified by the supplied mask to the
107 void t3_set_reg_field(adapter_t *adapter, unsigned int addr, u32 mask, u32 val)
109 u32 v = t3_read_reg(adapter, addr) & ~mask;
111 t3_write_reg(adapter, addr, v | val);
112 (void) t3_read_reg(adapter, addr); /* flush */
116 * t3_read_indirect - read indirectly addressed registers
118 * @addr_reg: register holding the indirect address
119 * @data_reg: register holding the value of the indirect register
120 * @vals: where the read register values are stored
121 * @start_idx: index of first indirect register to read
122 * @nregs: how many indirect registers to read
124 * Reads registers that are accessed indirectly through an address/data
127 static void t3_read_indirect(adapter_t *adap, unsigned int addr_reg,
128 unsigned int data_reg, u32 *vals, unsigned int nregs,
129 unsigned int start_idx)
132 t3_write_reg(adap, addr_reg, start_idx);
133 *vals++ = t3_read_reg(adap, data_reg);
139 * t3_mc7_bd_read - read from MC7 through backdoor accesses
140 * @mc7: identifies MC7 to read from
141 * @start: index of first 64-bit word to read
142 * @n: number of 64-bit words to read
143 * @buf: where to store the read result
145 * Read n 64-bit words from MC7 starting at word start, using backdoor
148 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
151 static int shift[] = { 0, 0, 16, 24 };
152 static int step[] = { 0, 32, 16, 8 };
154 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
155 adapter_t *adap = mc7->adapter;
157 if (start >= size64 || start + n > size64)
160 start *= (8 << mc7->width);
165 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
169 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR,
171 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
172 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
173 while ((val & F_BUSY) && attempts--)
174 val = t3_read_reg(adap,
175 mc7->offset + A_MC7_BD_OP);
179 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
180 if (mc7->width == 0) {
181 val64 = t3_read_reg(adap,
182 mc7->offset + A_MC7_BD_DATA0);
183 val64 |= (u64)val << 32;
186 val >>= shift[mc7->width];
187 val64 |= (u64)val << (step[mc7->width] * i);
199 static void mi1_init(adapter_t *adap, const struct adapter_info *ai)
201 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
202 u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
205 if (!(ai->caps & SUPPORTED_10000baseT_Full))
207 t3_write_reg(adap, A_MI1_CFG, val);
210 #define MDIO_ATTEMPTS 20
213 * MI1 read/write operations for direct-addressed PHYs.
215 static int mi1_read(adapter_t *adapter, int phy_addr, int mmd_addr,
216 int reg_addr, unsigned int *valp)
219 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
225 t3_write_reg(adapter, A_MI1_ADDR, addr);
226 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
227 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
229 *valp = t3_read_reg(adapter, A_MI1_DATA);
230 MDIO_UNLOCK(adapter);
234 static int mi1_write(adapter_t *adapter, int phy_addr, int mmd_addr,
235 int reg_addr, unsigned int val)
238 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
244 t3_write_reg(adapter, A_MI1_ADDR, addr);
245 t3_write_reg(adapter, A_MI1_DATA, val);
246 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
247 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
248 MDIO_UNLOCK(adapter);
252 static struct mdio_ops mi1_mdio_ops = {
258 * MI1 read/write operations for indirect-addressed PHYs.
260 static int mi1_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
261 int reg_addr, unsigned int *valp)
264 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
267 t3_write_reg(adapter, A_MI1_ADDR, addr);
268 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
269 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
270 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
272 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
273 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
276 *valp = t3_read_reg(adapter, A_MI1_DATA);
278 MDIO_UNLOCK(adapter);
282 static int mi1_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
283 int reg_addr, unsigned int val)
286 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
289 t3_write_reg(adapter, A_MI1_ADDR, addr);
290 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
291 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
292 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
294 t3_write_reg(adapter, A_MI1_DATA, val);
295 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
296 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
299 MDIO_UNLOCK(adapter);
303 static struct mdio_ops mi1_mdio_ext_ops = {
309 * t3_mdio_change_bits - modify the value of a PHY register
310 * @phy: the PHY to operate on
311 * @mmd: the device address
312 * @reg: the register address
313 * @clear: what part of the register value to mask off
314 * @set: what part of the register value to set
316 * Changes the value of a PHY register by applying a mask to its current
317 * value and ORing the result with a new value.
319 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
325 ret = mdio_read(phy, mmd, reg, &val);
328 ret = mdio_write(phy, mmd, reg, val | set);
334 * t3_phy_reset - reset a PHY block
335 * @phy: the PHY to operate on
336 * @mmd: the device address of the PHY block to reset
337 * @wait: how long to wait for the reset to complete in 1ms increments
339 * Resets a PHY block and optionally waits for the reset to complete.
340 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
343 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
348 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
353 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
359 } while (ctl && --wait);
365 * t3_phy_advertise - set the PHY advertisement registers for autoneg
366 * @phy: the PHY to operate on
367 * @advert: bitmap of capabilities the PHY should advertise
369 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
370 * requested capabilities.
372 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
375 unsigned int val = 0;
377 err = mdio_read(phy, 0, MII_CTRL1000, &val);
381 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
382 if (advert & ADVERTISED_1000baseT_Half)
383 val |= ADVERTISE_1000HALF;
384 if (advert & ADVERTISED_1000baseT_Full)
385 val |= ADVERTISE_1000FULL;
387 err = mdio_write(phy, 0, MII_CTRL1000, val);
392 if (advert & ADVERTISED_10baseT_Half)
393 val |= ADVERTISE_10HALF;
394 if (advert & ADVERTISED_10baseT_Full)
395 val |= ADVERTISE_10FULL;
396 if (advert & ADVERTISED_100baseT_Half)
397 val |= ADVERTISE_100HALF;
398 if (advert & ADVERTISED_100baseT_Full)
399 val |= ADVERTISE_100FULL;
400 if (advert & ADVERTISED_Pause)
401 val |= ADVERTISE_PAUSE_CAP;
402 if (advert & ADVERTISED_Asym_Pause)
403 val |= ADVERTISE_PAUSE_ASYM;
404 return mdio_write(phy, 0, MII_ADVERTISE, val);
408 * t3_set_phy_speed_duplex - force PHY speed and duplex
409 * @phy: the PHY to operate on
410 * @speed: requested PHY speed
411 * @duplex: requested PHY duplex
413 * Force a 10/100/1000 PHY's speed and duplex. This also disables
414 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
416 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
421 err = mdio_read(phy, 0, MII_BMCR, &ctl);
426 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
427 if (speed == SPEED_100)
428 ctl |= BMCR_SPEED100;
429 else if (speed == SPEED_1000)
430 ctl |= BMCR_SPEED1000;
433 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
434 if (duplex == DUPLEX_FULL)
435 ctl |= BMCR_FULLDPLX;
437 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
438 ctl |= BMCR_ANENABLE;
439 return mdio_write(phy, 0, MII_BMCR, ctl);
442 static struct adapter_info t3_adap_info[] = {
444 F_GPIO2_OEN | F_GPIO4_OEN |
445 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
447 &mi1_mdio_ops, "Chelsio PE9000" },
449 F_GPIO2_OEN | F_GPIO4_OEN |
450 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
452 &mi1_mdio_ops, "Chelsio T302" },
454 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
455 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
456 SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
457 &mi1_mdio_ext_ops, "Chelsio T310" },
459 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
460 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
461 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
462 SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
463 &mi1_mdio_ext_ops, "Chelsio T320" },
465 F_GPIO5_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO5_OUT_VAL |
466 F_GPIO6_OUT_VAL | F_GPIO7_OUT_VAL,
467 F_GPIO1 | F_GPIO2 | F_GPIO3 | F_GPIO4, SUPPORTED_AUI,
468 &mi1_mdio_ops, "Chelsio T304" },
472 * Return the adapter_info structure with a given index. Out-of-range indices
475 const struct adapter_info *t3_get_adapter_info(unsigned int id)
477 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
480 #define CAPS_1G (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | \
481 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII)
482 #define CAPS_10G (SUPPORTED_10000baseT_Full | SUPPORTED_AUI)
484 static struct port_type_info port_types[] = {
486 { t3_ael1002_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
488 { t3_vsc8211_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
489 "10/100/1000BASE-T" },
490 { t3_mv88e1xxx_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
491 "10/100/1000BASE-T" },
492 { t3_xaui_direct_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4" },
493 { NULL, CAPS_10G, "10GBASE-KX4" },
494 { t3_qt2045_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4" },
495 { t3_ael1006_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
497 { NULL, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4" },
503 #define VPD_ENTRY(name, len) \
504 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
507 * Partial EEPROM Vital Product Data structure. Includes only the ID and
516 VPD_ENTRY(pn, 16); /* part number */
517 VPD_ENTRY(ec, 16); /* EC level */
518 VPD_ENTRY(sn, 16); /* serial number */
519 VPD_ENTRY(na, 12); /* MAC address base */
520 VPD_ENTRY(cclk, 6); /* core clock */
521 VPD_ENTRY(mclk, 6); /* mem clock */
522 VPD_ENTRY(uclk, 6); /* uP clk */
523 VPD_ENTRY(mdc, 6); /* MDIO clk */
524 VPD_ENTRY(mt, 2); /* mem timing */
525 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
526 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
527 VPD_ENTRY(port0, 2); /* PHY0 complex */
528 VPD_ENTRY(port1, 2); /* PHY1 complex */
529 VPD_ENTRY(port2, 2); /* PHY2 complex */
530 VPD_ENTRY(port3, 2); /* PHY3 complex */
531 VPD_ENTRY(rv, 1); /* csum */
532 u32 pad; /* for multiple-of-4 sizing and alignment */
535 #define EEPROM_MAX_POLL 4
536 #define EEPROM_STAT_ADDR 0x4000
537 #define VPD_BASE 0xc00
540 * t3_seeprom_read - read a VPD EEPROM location
541 * @adapter: adapter to read
542 * @addr: EEPROM address
543 * @data: where to store the read data
545 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
546 * VPD ROM capability. A zero is written to the flag bit when the
547 * addres is written to the control register. The hardware device will
548 * set the flag to 1 when 4 bytes have been read into the data register.
550 int t3_seeprom_read(adapter_t *adapter, u32 addr, u32 *data)
553 int attempts = EEPROM_MAX_POLL;
554 unsigned int base = adapter->params.pci.vpd_cap_addr;
556 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
559 t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR, (u16)addr);
562 t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
563 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
565 if (!(val & PCI_VPD_ADDR_F)) {
566 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
569 t3_os_pci_read_config_4(adapter, base + PCI_VPD_DATA, data);
570 *data = le32_to_cpu(*data);
575 * t3_seeprom_write - write a VPD EEPROM location
576 * @adapter: adapter to write
577 * @addr: EEPROM address
578 * @data: value to write
580 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
581 * VPD ROM capability.
583 int t3_seeprom_write(adapter_t *adapter, u32 addr, u32 data)
586 int attempts = EEPROM_MAX_POLL;
587 unsigned int base = adapter->params.pci.vpd_cap_addr;
589 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
592 t3_os_pci_write_config_4(adapter, base + PCI_VPD_DATA,
594 t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR,
595 (u16)addr | PCI_VPD_ADDR_F);
598 t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
599 } while ((val & PCI_VPD_ADDR_F) && --attempts);
601 if (val & PCI_VPD_ADDR_F) {
602 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
609 * t3_seeprom_wp - enable/disable EEPROM write protection
610 * @adapter: the adapter
611 * @enable: 1 to enable write protection, 0 to disable it
613 * Enables or disables write protection on the serial EEPROM.
615 int t3_seeprom_wp(adapter_t *adapter, int enable)
617 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
621 * Convert a character holding a hex digit to a number.
623 static unsigned int hex2int(unsigned char c)
625 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
629 * get_vpd_params - read VPD parameters from VPD EEPROM
630 * @adapter: adapter to read
631 * @p: where to store the parameters
633 * Reads card parameters stored in VPD EEPROM.
635 static int get_vpd_params(adapter_t *adapter, struct vpd_params *p)
641 * Card information is normally at VPD_BASE but some early cards had
644 ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
647 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
649 for (i = 0; i < sizeof(vpd); i += 4) {
650 ret = t3_seeprom_read(adapter, addr + i,
651 (u32 *)((u8 *)&vpd + i));
656 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
657 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
658 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
659 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
660 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
662 /* Old eeproms didn't have port information */
663 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
664 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
665 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
667 p->port_type[0] = (u8)hex2int(vpd.port0_data[0]);
668 p->port_type[1] = (u8)hex2int(vpd.port1_data[0]);
669 p->port_type[2] = (u8)hex2int(vpd.port2_data[0]);
670 p->port_type[3] = (u8)hex2int(vpd.port3_data[0]);
671 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
672 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
675 for (i = 0; i < 6; i++)
676 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
677 hex2int(vpd.na_data[2 * i + 1]);
681 /* serial flash and firmware constants */
683 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
684 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
685 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
687 /* flash command opcodes */
688 SF_PROG_PAGE = 2, /* program page */
689 SF_WR_DISABLE = 4, /* disable writes */
690 SF_RD_STATUS = 5, /* read status register */
691 SF_WR_ENABLE = 6, /* enable writes */
692 SF_RD_DATA_FAST = 0xb, /* read flash */
693 SF_ERASE_SECTOR = 0xd8, /* erase sector */
695 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
696 FW_VERS_ADDR = 0x77ffc, /* flash address holding FW version */
697 FW_MIN_SIZE = 8 /* at least version and csum */
701 * sf1_read - read data from the serial flash
702 * @adapter: the adapter
703 * @byte_cnt: number of bytes to read
704 * @cont: whether another operation will be chained
705 * @valp: where to store the read data
707 * Reads up to 4 bytes of data from the serial flash. The location of
708 * the read needs to be specified prior to calling this by issuing the
709 * appropriate commands to the serial flash.
711 static int sf1_read(adapter_t *adapter, unsigned int byte_cnt, int cont,
716 if (!byte_cnt || byte_cnt > 4)
718 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
720 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
721 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
723 *valp = t3_read_reg(adapter, A_SF_DATA);
728 * sf1_write - write data to the serial flash
729 * @adapter: the adapter
730 * @byte_cnt: number of bytes to write
731 * @cont: whether another operation will be chained
732 * @val: value to write
734 * Writes up to 4 bytes of data to the serial flash. The location of
735 * the write needs to be specified prior to calling this by issuing the
736 * appropriate commands to the serial flash.
738 static int sf1_write(adapter_t *adapter, unsigned int byte_cnt, int cont,
741 if (!byte_cnt || byte_cnt > 4)
743 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
745 t3_write_reg(adapter, A_SF_DATA, val);
746 t3_write_reg(adapter, A_SF_OP,
747 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
748 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
752 * flash_wait_op - wait for a flash operation to complete
753 * @adapter: the adapter
754 * @attempts: max number of polls of the status register
755 * @delay: delay between polls in ms
757 * Wait for a flash operation to complete by polling the status register.
759 static int flash_wait_op(adapter_t *adapter, int attempts, int delay)
765 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
766 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
778 * t3_read_flash - read words from serial flash
779 * @adapter: the adapter
780 * @addr: the start address for the read
781 * @nwords: how many 32-bit words to read
782 * @data: where to store the read data
783 * @byte_oriented: whether to store data as bytes or as words
785 * Read the specified number of 32-bit words from the serial flash.
786 * If @byte_oriented is set the read data is stored as a byte array
787 * (i.e., big-endian), otherwise as 32-bit words in the platform's
790 int t3_read_flash(adapter_t *adapter, unsigned int addr, unsigned int nwords,
791 u32 *data, int byte_oriented)
795 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
798 addr = swab32(addr) | SF_RD_DATA_FAST;
800 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
801 (ret = sf1_read(adapter, 1, 1, data)) != 0)
804 for ( ; nwords; nwords--, data++) {
805 ret = sf1_read(adapter, 4, nwords > 1, data);
809 *data = htonl(*data);
815 * t3_write_flash - write up to a page of data to the serial flash
816 * @adapter: the adapter
817 * @addr: the start address to write
818 * @n: length of data to write
819 * @data: the data to write
821 * Writes up to a page of data (256 bytes) to the serial flash starting
822 * at the given address.
824 static int t3_write_flash(adapter_t *adapter, unsigned int addr,
825 unsigned int n, const u8 *data)
829 unsigned int i, c, left, val, offset = addr & 0xff;
831 if (addr + n > SF_SIZE || offset + n > 256)
834 val = swab32(addr) | SF_PROG_PAGE;
836 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
837 (ret = sf1_write(adapter, 4, 1, val)) != 0)
840 for (left = n; left; left -= c) {
842 for (val = 0, i = 0; i < c; ++i)
843 val = (val << 8) + *data++;
845 ret = sf1_write(adapter, c, c != left, val);
849 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
852 /* Read the page to verify the write succeeded */
853 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
857 if (memcmp(data - n, (u8 *)buf + offset, n))
863 * t3_check_tpsram_version - read the tp sram version
864 * @adapter: the adapter
866 * Reads the protocol sram version from serial eeprom.
868 int t3_check_tpsram_version(adapter_t *adapter)
872 unsigned int major, minor;
874 /* Get version loaded in SRAM */
875 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
876 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
881 vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
883 major = G_TP_VERSION_MAJOR(vers);
884 minor = G_TP_VERSION_MINOR(vers);
886 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
893 * t3_check_tpsram - check if provided protocol SRAM
894 * is compatible with this driver
895 * @adapter: the adapter
896 * @tp_sram: the firmware image to write
899 * Checks if an adapter's tp sram is compatible with the driver.
900 * Returns 0 if the versions are compatible, a negative error otherwise.
902 int t3_check_tpsram(adapter_t *adapter, u8 *tp_sram, unsigned int size)
906 const u32 *p = (const u32 *)tp_sram;
908 /* Verify checksum */
909 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
911 if (csum != 0xffffffff) {
912 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
920 enum fw_version_type {
926 * t3_get_fw_version - read the firmware version
927 * @adapter: the adapter
928 * @vers: where to place the version
930 * Reads the FW version from flash.
932 int t3_get_fw_version(adapter_t *adapter, u32 *vers)
934 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
938 * t3_check_fw_version - check if the FW is compatible with this driver
939 * @adapter: the adapter
941 * Checks if an adapter's FW is compatible with the driver. Returns 0
942 * if the versions are compatible, a negative error otherwise.
944 int t3_check_fw_version(adapter_t *adapter)
948 unsigned int type, major, minor;
950 ret = t3_get_fw_version(adapter, &vers);
954 type = G_FW_VERSION_TYPE(vers);
955 major = G_FW_VERSION_MAJOR(vers);
956 minor = G_FW_VERSION_MINOR(vers);
958 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
959 minor == FW_VERSION_MINOR)
962 CH_ERR(adapter, "found wrong FW version (%u.%u), "
963 "driver needs version %d.%d\n", major, minor,
964 FW_VERSION_MAJOR, FW_VERSION_MINOR);
969 * t3_flash_erase_sectors - erase a range of flash sectors
970 * @adapter: the adapter
971 * @start: the first sector to erase
972 * @end: the last sector to erase
974 * Erases the sectors in the given range.
976 static int t3_flash_erase_sectors(adapter_t *adapter, int start, int end)
978 while (start <= end) {
981 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
982 (ret = sf1_write(adapter, 4, 0,
983 SF_ERASE_SECTOR | (start << 8))) != 0 ||
984 (ret = flash_wait_op(adapter, 5, 500)) != 0)
992 * t3_load_fw - download firmware
993 * @adapter: the adapter
994 * @fw_data: the firmware image to write
997 * Write the supplied firmware image to the card's serial flash.
998 * The FW image has the following sections: @size - 8 bytes of code and
999 * data, followed by 4 bytes of FW version, followed by the 32-bit
1000 * 1's complement checksum of the whole image.
1002 int t3_load_fw(adapter_t *adapter, const u8 *fw_data, unsigned int size)
1006 const u32 *p = (const u32 *)fw_data;
1007 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1009 if ((size & 3) || size < FW_MIN_SIZE)
1011 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1014 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1015 csum += ntohl(p[i]);
1016 if (csum != 0xffffffff) {
1017 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1022 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1026 size -= 8; /* trim off version and checksum */
1027 for (addr = FW_FLASH_BOOT_ADDR; size; ) {
1028 unsigned int chunk_size = min(size, 256U);
1030 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1035 fw_data += chunk_size;
1039 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1042 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1046 #define CIM_CTL_BASE 0x2000
1049 * t3_cim_ctl_blk_read - read a block from CIM control region
1051 * @adap: the adapter
1052 * @addr: the start address within the CIM control region
1053 * @n: number of words to read
1054 * @valp: where to store the result
1056 * Reads a block of 4-byte words from the CIM control region.
1058 int t3_cim_ctl_blk_read(adapter_t *adap, unsigned int addr, unsigned int n,
1063 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1066 for ( ; !ret && n--; addr += 4) {
1067 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1068 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1071 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1077 * t3_link_changed - handle interface link changes
1078 * @adapter: the adapter
1079 * @port_id: the port index that changed link state
1081 * Called when a port's link settings change to propagate the new values
1082 * to the associated PHY and MAC. After performing the common tasks it
1083 * invokes an OS-specific handler.
1085 void t3_link_changed(adapter_t *adapter, int port_id)
1087 int link_ok, speed, duplex, fc;
1088 struct port_info *pi = adap2pinfo(adapter, port_id);
1089 struct cphy *phy = &pi->phy;
1090 struct cmac *mac = &pi->mac;
1091 struct link_config *lc = &pi->link_config;
1093 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1095 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1096 uses_xaui(adapter)) {
1099 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1100 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1102 lc->link_ok = (unsigned char)link_ok;
1103 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1104 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1105 if (lc->requested_fc & PAUSE_AUTONEG)
1106 fc &= lc->requested_fc;
1108 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1110 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1111 /* Set MAC speed, duplex, and flow control to match PHY. */
1112 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1113 lc->fc = (unsigned char)fc;
1116 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1120 * t3_link_start - apply link configuration to MAC/PHY
1121 * @phy: the PHY to setup
1122 * @mac: the MAC to setup
1123 * @lc: the requested link configuration
1125 * Set up a port's MAC and PHY according to a desired link configuration.
1126 * - If the PHY can auto-negotiate first decide what to advertise, then
1127 * enable/disable auto-negotiation as desired, and reset.
1128 * - If the PHY does not auto-negotiate just reset it.
1129 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1130 * otherwise do it later based on the outcome of auto-negotiation.
1132 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1134 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1137 if (lc->supported & SUPPORTED_Autoneg) {
1138 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1140 lc->advertising |= ADVERTISED_Asym_Pause;
1142 lc->advertising |= ADVERTISED_Pause;
1144 phy->ops->advertise(phy, lc->advertising);
1146 if (lc->autoneg == AUTONEG_DISABLE) {
1147 lc->speed = lc->requested_speed;
1148 lc->duplex = lc->requested_duplex;
1149 lc->fc = (unsigned char)fc;
1150 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1152 /* Also disables autoneg */
1153 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1154 phy->ops->reset(phy, 0);
1156 phy->ops->autoneg_enable(phy);
1158 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1159 lc->fc = (unsigned char)fc;
1160 phy->ops->reset(phy, 0);
1166 * t3_set_vlan_accel - control HW VLAN extraction
1167 * @adapter: the adapter
1168 * @ports: bitmap of adapter ports to operate on
1169 * @on: enable (1) or disable (0) HW VLAN extraction
1171 * Enables or disables HW extraction of VLAN tags for the given port.
1173 void t3_set_vlan_accel(adapter_t *adapter, unsigned int ports, int on)
1175 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1176 ports << S_VLANEXTRACTIONENABLE,
1177 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1181 unsigned int mask; /* bits to check in interrupt status */
1182 const char *msg; /* message to print or NULL */
1183 short stat_idx; /* stat counter to increment or -1 */
1184 unsigned short fatal:1; /* whether the condition reported is fatal */
1188 * t3_handle_intr_status - table driven interrupt handler
1189 * @adapter: the adapter that generated the interrupt
1190 * @reg: the interrupt status register to process
1191 * @mask: a mask to apply to the interrupt status
1192 * @acts: table of interrupt actions
1193 * @stats: statistics counters tracking interrupt occurences
1195 * A table driven interrupt handler that applies a set of masks to an
1196 * interrupt status word and performs the corresponding actions if the
1197 * interrupts described by the mask have occured. The actions include
1198 * optionally printing a warning or alert message, and optionally
1199 * incrementing a stat counter. The table is terminated by an entry
1200 * specifying mask 0. Returns the number of fatal interrupt conditions.
1202 static int t3_handle_intr_status(adapter_t *adapter, unsigned int reg,
1204 const struct intr_info *acts,
1205 unsigned long *stats)
1208 unsigned int status = t3_read_reg(adapter, reg) & mask;
1210 for ( ; acts->mask; ++acts) {
1211 if (!(status & acts->mask)) continue;
1214 CH_ALERT(adapter, "%s (0x%x)\n",
1215 acts->msg, status & acts->mask);
1216 } else if (acts->msg)
1217 CH_WARN(adapter, "%s (0x%x)\n",
1218 acts->msg, status & acts->mask);
1219 if (acts->stat_idx >= 0)
1220 stats[acts->stat_idx]++;
1222 if (status) /* clear processed interrupts */
1223 t3_write_reg(adapter, reg, status);
1227 #define SGE_INTR_MASK (F_RSPQDISABLED)
1228 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1229 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1231 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1232 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1233 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1234 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1235 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1236 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1237 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1238 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1239 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1240 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1241 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1242 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1243 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1244 V_BISTERR(M_BISTERR) | F_PEXERR)
1245 #define ULPRX_INTR_MASK F_PARERR
1246 #define ULPTX_INTR_MASK 0
1247 #define CPLSW_INTR_MASK (F_TP_FRAMING_ERROR | \
1248 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1249 F_ZERO_SWITCH_ERROR)
1250 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1251 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1252 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1253 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT)
1254 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1255 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1256 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1257 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1258 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1259 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1260 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1261 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1262 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1263 V_MCAPARERRENB(M_MCAPARERRENB))
1264 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1265 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1266 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1267 F_MPS0 | F_CPL_SWITCH)
1270 * Interrupt handler for the PCIX1 module.
1272 static void pci_intr_handler(adapter_t *adapter)
1274 static struct intr_info pcix1_intr_info[] = {
1275 { F_MSTDETPARERR, "PCI master detected parity error", -1, 1 },
1276 { F_SIGTARABT, "PCI signaled target abort", -1, 1 },
1277 { F_RCVTARABT, "PCI received target abort", -1, 1 },
1278 { F_RCVMSTABT, "PCI received master abort", -1, 1 },
1279 { F_SIGSYSERR, "PCI signaled system error", -1, 1 },
1280 { F_DETPARERR, "PCI detected parity error", -1, 1 },
1281 { F_SPLCMPDIS, "PCI split completion discarded", -1, 1 },
1282 { F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1 },
1283 { F_RCVSPLCMPERR, "PCI received split completion error", -1,
1285 { F_DETCORECCERR, "PCI correctable ECC error",
1286 STAT_PCI_CORR_ECC, 0 },
1287 { F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1 },
1288 { F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1289 { V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1291 { V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1293 { V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1295 { V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1300 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1301 pcix1_intr_info, adapter->irq_stats))
1302 t3_fatal_err(adapter);
1306 * Interrupt handler for the PCIE module.
1308 static void pcie_intr_handler(adapter_t *adapter)
1310 static struct intr_info pcie_intr_info[] = {
1311 { F_PEXERR, "PCI PEX error", -1, 1 },
1313 "PCI unexpected split completion DMA read error", -1, 1 },
1315 "PCI unexpected split completion DMA command error", -1, 1 },
1316 { F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1317 { F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1 },
1318 { F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1 },
1319 { F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1 },
1320 { V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1321 "PCI MSI-X table/PBA parity error", -1, 1 },
1322 { V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1 },
1326 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1327 pcie_intr_info, adapter->irq_stats))
1328 t3_fatal_err(adapter);
1332 * TP interrupt handler.
1334 static void tp_intr_handler(adapter_t *adapter)
1336 static struct intr_info tp_intr_info[] = {
1337 { 0xffffff, "TP parity error", -1, 1 },
1338 { 0x1000000, "TP out of Rx pages", -1, 1 },
1339 { 0x2000000, "TP out of Tx pages", -1, 1 },
1343 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1344 tp_intr_info, NULL))
1345 t3_fatal_err(adapter);
1349 * CIM interrupt handler.
1351 static void cim_intr_handler(adapter_t *adapter)
1353 static struct intr_info cim_intr_info[] = {
1354 { F_RSVDSPACEINT, "CIM reserved space write", -1, 1 },
1355 { F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1 },
1356 { F_FLASHRANGEINT, "CIM flash address out of range", -1, 1 },
1357 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1358 { F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1 },
1359 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1360 { F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1 },
1361 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1362 { F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1 },
1363 { F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1 },
1364 { F_BLKRDPLINT, "CIM block read from PL space", -1, 1 },
1365 { F_BLKWRPLINT, "CIM block write to PL space", -1, 1 },
1369 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1370 cim_intr_info, NULL))
1371 t3_fatal_err(adapter);
1375 * ULP RX interrupt handler.
1377 static void ulprx_intr_handler(adapter_t *adapter)
1379 static struct intr_info ulprx_intr_info[] = {
1380 { F_PARERR, "ULP RX parity error", -1, 1 },
1384 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1385 ulprx_intr_info, NULL))
1386 t3_fatal_err(adapter);
1390 * ULP TX interrupt handler.
1392 static void ulptx_intr_handler(adapter_t *adapter)
1394 static struct intr_info ulptx_intr_info[] = {
1395 { F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1396 STAT_ULP_CH0_PBL_OOB, 0 },
1397 { F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1398 STAT_ULP_CH1_PBL_OOB, 0 },
1402 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1403 ulptx_intr_info, adapter->irq_stats))
1404 t3_fatal_err(adapter);
1407 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1408 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1409 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1410 F_ICSPI1_TX_FRAMING_ERROR)
1411 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1412 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1413 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1414 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1417 * PM TX interrupt handler.
1419 static void pmtx_intr_handler(adapter_t *adapter)
1421 static struct intr_info pmtx_intr_info[] = {
1422 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1423 { ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1 },
1424 { OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1 },
1425 { V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1426 "PMTX ispi parity error", -1, 1 },
1427 { V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1428 "PMTX ospi parity error", -1, 1 },
1432 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1433 pmtx_intr_info, NULL))
1434 t3_fatal_err(adapter);
1437 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1438 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1439 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1440 F_IESPI1_TX_FRAMING_ERROR)
1441 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1442 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1443 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1444 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1447 * PM RX interrupt handler.
1449 static void pmrx_intr_handler(adapter_t *adapter)
1451 static struct intr_info pmrx_intr_info[] = {
1452 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1453 { IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1 },
1454 { OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1 },
1455 { V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1456 "PMRX ispi parity error", -1, 1 },
1457 { V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1458 "PMRX ospi parity error", -1, 1 },
1462 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1463 pmrx_intr_info, NULL))
1464 t3_fatal_err(adapter);
1468 * CPL switch interrupt handler.
1470 static void cplsw_intr_handler(adapter_t *adapter)
1472 static struct intr_info cplsw_intr_info[] = {
1473 // { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 },
1474 { F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1 },
1475 { F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1 },
1476 { F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1 },
1477 { F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1 },
1481 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1482 cplsw_intr_info, NULL))
1483 t3_fatal_err(adapter);
1487 * MPS interrupt handler.
1489 static void mps_intr_handler(adapter_t *adapter)
1491 static struct intr_info mps_intr_info[] = {
1492 { 0x1ff, "MPS parity error", -1, 1 },
1496 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1497 mps_intr_info, NULL))
1498 t3_fatal_err(adapter);
1501 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1504 * MC7 interrupt handler.
1506 static void mc7_intr_handler(struct mc7 *mc7)
1508 adapter_t *adapter = mc7->adapter;
1509 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1512 mc7->stats.corr_err++;
1513 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1514 "data 0x%x 0x%x 0x%x\n", mc7->name,
1515 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1516 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1517 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1518 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1522 mc7->stats.uncorr_err++;
1523 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1524 "data 0x%x 0x%x 0x%x\n", mc7->name,
1525 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1526 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1527 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1528 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1532 mc7->stats.parity_err++;
1533 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1534 mc7->name, G_PE(cause));
1540 if (adapter->params.rev > 0)
1541 addr = t3_read_reg(adapter,
1542 mc7->offset + A_MC7_ERR_ADDR);
1543 mc7->stats.addr_err++;
1544 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1548 if (cause & MC7_INTR_FATAL)
1549 t3_fatal_err(adapter);
1551 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1554 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1555 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1557 * XGMAC interrupt handler.
1559 static int mac_intr_handler(adapter_t *adap, unsigned int idx)
1564 idx = idx == 0 ? 0 : adapter_info(adap)->nports0; /* MAC idx -> port */
1565 mac = &adap2pinfo(adap, idx)->mac;
1566 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1568 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1569 mac->stats.tx_fifo_parity_err++;
1570 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1572 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1573 mac->stats.rx_fifo_parity_err++;
1574 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1576 if (cause & F_TXFIFO_UNDERRUN)
1577 mac->stats.tx_fifo_urun++;
1578 if (cause & F_RXFIFO_OVERFLOW)
1579 mac->stats.rx_fifo_ovfl++;
1580 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1581 mac->stats.serdes_signal_loss++;
1582 if (cause & F_XAUIPCSCTCERR)
1583 mac->stats.xaui_pcs_ctc_err++;
1584 if (cause & F_XAUIPCSALIGNCHANGE)
1585 mac->stats.xaui_pcs_align_change++;
1587 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1588 if (cause & XGM_INTR_FATAL)
1594 * Interrupt handler for PHY events.
1596 int t3_phy_intr_handler(adapter_t *adapter)
1598 u32 mask, gpi = adapter_info(adapter)->gpio_intr;
1599 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1601 for_each_port(adapter, i) {
1602 struct port_info *p = adap2pinfo(adapter, i);
1604 mask = gpi - (gpi & (gpi - 1));
1607 if (!(p->port_type->caps & SUPPORTED_IRQ))
1611 int phy_cause = p->phy.ops->intr_handler(&p->phy);
1613 if (phy_cause & cphy_cause_link_change)
1614 t3_link_changed(adapter, i);
1615 if (phy_cause & cphy_cause_fifo_error)
1616 p->phy.fifo_errors++;
1620 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1625 * T3 slow path (non-data) interrupt handler.
1627 int t3_slow_intr_handler(adapter_t *adapter)
1629 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1631 cause &= adapter->slow_intr_mask;
1634 if (cause & F_PCIM0) {
1635 if (is_pcie(adapter))
1636 pcie_intr_handler(adapter);
1638 pci_intr_handler(adapter);
1641 t3_sge_err_intr_handler(adapter);
1642 if (cause & F_MC7_PMRX)
1643 mc7_intr_handler(&adapter->pmrx);
1644 if (cause & F_MC7_PMTX)
1645 mc7_intr_handler(&adapter->pmtx);
1646 if (cause & F_MC7_CM)
1647 mc7_intr_handler(&adapter->cm);
1649 cim_intr_handler(adapter);
1651 tp_intr_handler(adapter);
1652 if (cause & F_ULP2_RX)
1653 ulprx_intr_handler(adapter);
1654 if (cause & F_ULP2_TX)
1655 ulptx_intr_handler(adapter);
1656 if (cause & F_PM1_RX)
1657 pmrx_intr_handler(adapter);
1658 if (cause & F_PM1_TX)
1659 pmtx_intr_handler(adapter);
1660 if (cause & F_CPL_SWITCH)
1661 cplsw_intr_handler(adapter);
1663 mps_intr_handler(adapter);
1665 t3_mc5_intr_handler(&adapter->mc5);
1666 if (cause & F_XGMAC0_0)
1667 mac_intr_handler(adapter, 0);
1668 if (cause & F_XGMAC0_1)
1669 mac_intr_handler(adapter, 1);
1670 if (cause & F_T3DBG)
1671 t3_os_ext_intr_handler(adapter);
1673 /* Clear the interrupts just processed. */
1674 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1675 (void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1680 * t3_intr_enable - enable interrupts
1681 * @adapter: the adapter whose interrupts should be enabled
1683 * Enable interrupts by setting the interrupt enable registers of the
1684 * various HW modules and then enabling the top-level interrupt
1687 void t3_intr_enable(adapter_t *adapter)
1689 static struct addr_val_pair intr_en_avp[] = {
1690 { A_SG_INT_ENABLE, SGE_INTR_MASK },
1691 { A_MC7_INT_ENABLE, MC7_INTR_MASK },
1692 { A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1694 { A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1696 { A_MC5_DB_INT_ENABLE, MC5_INTR_MASK },
1697 { A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK },
1698 { A_TP_INT_ENABLE, 0x3bfffff },
1699 { A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK },
1700 { A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK },
1701 { A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK },
1702 { A_MPS_INT_ENABLE, MPS_INTR_MASK },
1705 adapter->slow_intr_mask = PL_INTR_MASK;
1707 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1709 if (adapter->params.rev > 0) {
1710 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1711 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1712 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1713 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1714 F_PBL_BOUND_ERR_CH1);
1716 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1717 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1720 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW,
1721 adapter_info(adapter)->gpio_intr);
1722 t3_write_reg(adapter, A_T3DBG_INT_ENABLE,
1723 adapter_info(adapter)->gpio_intr);
1724 if (is_pcie(adapter))
1725 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1727 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1728 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1729 (void) t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1733 * t3_intr_disable - disable a card's interrupts
1734 * @adapter: the adapter whose interrupts should be disabled
1736 * Disable interrupts. We only disable the top-level interrupt
1737 * concentrator and the SGE data interrupts.
1739 void t3_intr_disable(adapter_t *adapter)
1741 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1742 (void) t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1743 adapter->slow_intr_mask = 0;
1747 * t3_intr_clear - clear all interrupts
1748 * @adapter: the adapter whose interrupts should be cleared
1750 * Clears all interrupts.
1752 void t3_intr_clear(adapter_t *adapter)
1754 static const unsigned int cause_reg_addr[] = {
1756 A_SG_RSPQ_FL_STATUS,
1759 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1760 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1761 A_CIM_HOST_INT_CAUSE,
1774 /* Clear PHY and MAC interrupts for each port. */
1775 for_each_port(adapter, i)
1776 t3_port_intr_clear(adapter, i);
1778 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1779 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1781 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1782 (void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1786 * t3_port_intr_enable - enable port-specific interrupts
1787 * @adapter: associated adapter
1788 * @idx: index of port whose interrupts should be enabled
1790 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1793 void t3_port_intr_enable(adapter_t *adapter, int idx)
1795 struct port_info *pi = adap2pinfo(adapter, idx);
1797 t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, XGM_INTR_MASK);
1798 pi->phy.ops->intr_enable(&pi->phy);
1802 * t3_port_intr_disable - disable port-specific interrupts
1803 * @adapter: associated adapter
1804 * @idx: index of port whose interrupts should be disabled
1806 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
1809 void t3_port_intr_disable(adapter_t *adapter, int idx)
1811 struct port_info *pi = adap2pinfo(adapter, idx);
1813 t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, 0);
1814 pi->phy.ops->intr_disable(&pi->phy);
1818 * t3_port_intr_clear - clear port-specific interrupts
1819 * @adapter: associated adapter
1820 * @idx: index of port whose interrupts to clear
1822 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
1825 void t3_port_intr_clear(adapter_t *adapter, int idx)
1827 struct port_info *pi = adap2pinfo(adapter, idx);
1829 t3_write_reg(adapter, A_XGM_INT_CAUSE + pi->mac.offset, 0xffffffff);
1830 pi->phy.ops->intr_clear(&pi->phy);
1834 * t3_sge_write_context - write an SGE context
1835 * @adapter: the adapter
1836 * @id: the context id
1837 * @type: the context type
1839 * Program an SGE context with the values already loaded in the
1840 * CONTEXT_DATA? registers.
1842 static int t3_sge_write_context(adapter_t *adapter, unsigned int id,
1845 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
1846 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
1847 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
1848 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
1849 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1850 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
1851 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1856 * t3_sge_init_ecntxt - initialize an SGE egress context
1857 * @adapter: the adapter to configure
1858 * @id: the context id
1859 * @gts_enable: whether to enable GTS for the context
1860 * @type: the egress context type
1861 * @respq: associated response queue
1862 * @base_addr: base address of queue
1863 * @size: number of queue entries
1865 * @gen: initial generation value for the context
1866 * @cidx: consumer pointer
1868 * Initialize an SGE egress context and make it ready for use. If the
1869 * platform allows concurrent context operations, the caller is
1870 * responsible for appropriate locking.
1872 int t3_sge_init_ecntxt(adapter_t *adapter, unsigned int id, int gts_enable,
1873 enum sge_context_type type, int respq, u64 base_addr,
1874 unsigned int size, unsigned int token, int gen,
1877 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
1879 if (base_addr & 0xfff) /* must be 4K aligned */
1881 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1885 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
1886 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
1887 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
1888 V_EC_BASE_LO((u32)base_addr & 0xffff));
1890 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, (u32)base_addr);
1892 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1893 V_EC_BASE_HI((u32)base_addr & 0xf) | V_EC_RESPQ(respq) |
1894 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
1896 return t3_sge_write_context(adapter, id, F_EGRESS);
1900 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
1901 * @adapter: the adapter to configure
1902 * @id: the context id
1903 * @gts_enable: whether to enable GTS for the context
1904 * @base_addr: base address of queue
1905 * @size: number of queue entries
1906 * @bsize: size of each buffer for this queue
1907 * @cong_thres: threshold to signal congestion to upstream producers
1908 * @gen: initial generation value for the context
1909 * @cidx: consumer pointer
1911 * Initialize an SGE free list context and make it ready for use. The
1912 * caller is responsible for ensuring only one context operation occurs
1915 int t3_sge_init_flcntxt(adapter_t *adapter, unsigned int id, int gts_enable,
1916 u64 base_addr, unsigned int size, unsigned int bsize,
1917 unsigned int cong_thres, int gen, unsigned int cidx)
1919 if (base_addr & 0xfff) /* must be 4K aligned */
1921 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1925 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, (u32)base_addr);
1927 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
1928 V_FL_BASE_HI((u32)base_addr) |
1929 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
1930 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
1931 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
1932 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
1933 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1934 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
1935 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
1936 return t3_sge_write_context(adapter, id, F_FREELIST);
1940 * t3_sge_init_rspcntxt - initialize an SGE response queue context
1941 * @adapter: the adapter to configure
1942 * @id: the context id
1943 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
1944 * @base_addr: base address of queue
1945 * @size: number of queue entries
1946 * @fl_thres: threshold for selecting the normal or jumbo free list
1947 * @gen: initial generation value for the context
1948 * @cidx: consumer pointer
1950 * Initialize an SGE response queue context and make it ready for use.
1951 * The caller is responsible for ensuring only one context operation
1954 int t3_sge_init_rspcntxt(adapter_t *adapter, unsigned int id, int irq_vec_idx,
1955 u64 base_addr, unsigned int size,
1956 unsigned int fl_thres, int gen, unsigned int cidx)
1958 unsigned int intr = 0;
1960 if (base_addr & 0xfff) /* must be 4K aligned */
1962 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1966 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
1968 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
1970 if (irq_vec_idx >= 0)
1971 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
1972 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
1973 V_CQ_BASE_HI((u32)base_addr) | intr | V_RQ_GEN(gen));
1974 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
1975 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
1979 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
1980 * @adapter: the adapter to configure
1981 * @id: the context id
1982 * @base_addr: base address of queue
1983 * @size: number of queue entries
1984 * @rspq: response queue for async notifications
1985 * @ovfl_mode: CQ overflow mode
1986 * @credits: completion queue credits
1987 * @credit_thres: the credit threshold
1989 * Initialize an SGE completion queue context and make it ready for use.
1990 * The caller is responsible for ensuring only one context operation
1993 int t3_sge_init_cqcntxt(adapter_t *adapter, unsigned int id, u64 base_addr,
1994 unsigned int size, int rspq, int ovfl_mode,
1995 unsigned int credits, unsigned int credit_thres)
1997 if (base_addr & 0xfff) /* must be 4K aligned */
1999 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2003 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2004 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
2006 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2007 V_CQ_BASE_HI((u32)base_addr) | V_CQ_RSPQ(rspq) |
2008 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode));
2009 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2010 V_CQ_CREDIT_THRES(credit_thres));
2011 return t3_sge_write_context(adapter, id, F_CQ);
2015 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2016 * @adapter: the adapter
2017 * @id: the egress context id
2018 * @enable: enable (1) or disable (0) the context
2020 * Enable or disable an SGE egress context. The caller is responsible for
2021 * ensuring only one context operation occurs at a time.
2023 int t3_sge_enable_ecntxt(adapter_t *adapter, unsigned int id, int enable)
2025 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2028 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2029 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2030 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2031 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2032 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2033 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2034 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2035 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2040 * t3_sge_disable_fl - disable an SGE free-buffer list
2041 * @adapter: the adapter
2042 * @id: the free list context id
2044 * Disable an SGE free-buffer list. The caller is responsible for
2045 * ensuring only one context operation occurs at a time.
2047 int t3_sge_disable_fl(adapter_t *adapter, unsigned int id)
2049 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2052 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2053 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2054 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2055 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2056 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2057 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2058 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2059 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2064 * t3_sge_disable_rspcntxt - disable an SGE response queue
2065 * @adapter: the adapter
2066 * @id: the response queue context id
2068 * Disable an SGE response queue. The caller is responsible for
2069 * ensuring only one context operation occurs at a time.
2071 int t3_sge_disable_rspcntxt(adapter_t *adapter, unsigned int id)
2073 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2076 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2077 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2078 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2079 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2080 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2081 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2082 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2083 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2088 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2089 * @adapter: the adapter
2090 * @id: the completion queue context id
2092 * Disable an SGE completion queue. The caller is responsible for
2093 * ensuring only one context operation occurs at a time.
2095 int t3_sge_disable_cqcntxt(adapter_t *adapter, unsigned int id)
2097 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2100 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2101 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2102 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2103 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2104 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2105 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2106 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2107 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2112 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2113 * @adapter: the adapter
2114 * @id: the context id
2115 * @op: the operation to perform
2117 * Perform the selected operation on an SGE completion queue context.
2118 * The caller is responsible for ensuring only one context operation
2121 int t3_sge_cqcntxt_op(adapter_t *adapter, unsigned int id, unsigned int op,
2122 unsigned int credits)
2126 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2129 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2130 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2131 V_CONTEXT(id) | F_CQ);
2132 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2136 if (op >= 2 && op < 7) {
2137 if (adapter->params.rev > 0)
2138 return G_CQ_INDEX(val);
2140 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2141 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2142 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2143 F_CONTEXT_CMD_BUSY, 0, 5, 1))
2145 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2151 * t3_sge_read_context - read an SGE context
2152 * @type: the context type
2153 * @adapter: the adapter
2154 * @id: the context id
2155 * @data: holds the retrieved context
2157 * Read an SGE egress context. The caller is responsible for ensuring
2158 * only one context operation occurs at a time.
2160 static int t3_sge_read_context(unsigned int type, adapter_t *adapter,
2161 unsigned int id, u32 data[4])
2163 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2166 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2167 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2168 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2171 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2172 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2173 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2174 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2179 * t3_sge_read_ecntxt - read an SGE egress context
2180 * @adapter: the adapter
2181 * @id: the context id
2182 * @data: holds the retrieved context
2184 * Read an SGE egress context. The caller is responsible for ensuring
2185 * only one context operation occurs at a time.
2187 int t3_sge_read_ecntxt(adapter_t *adapter, unsigned int id, u32 data[4])
2191 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2195 * t3_sge_read_cq - read an SGE CQ context
2196 * @adapter: the adapter
2197 * @id: the context id
2198 * @data: holds the retrieved context
2200 * Read an SGE CQ context. The caller is responsible for ensuring
2201 * only one context operation occurs at a time.
2203 int t3_sge_read_cq(adapter_t *adapter, unsigned int id, u32 data[4])
2207 return t3_sge_read_context(F_CQ, adapter, id, data);
2211 * t3_sge_read_fl - read an SGE free-list context
2212 * @adapter: the adapter
2213 * @id: the context id
2214 * @data: holds the retrieved context
2216 * Read an SGE free-list context. The caller is responsible for ensuring
2217 * only one context operation occurs at a time.
2219 int t3_sge_read_fl(adapter_t *adapter, unsigned int id, u32 data[4])
2221 if (id >= SGE_QSETS * 2)
2223 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2227 * t3_sge_read_rspq - read an SGE response queue context
2228 * @adapter: the adapter
2229 * @id: the context id
2230 * @data: holds the retrieved context
2232 * Read an SGE response queue context. The caller is responsible for
2233 * ensuring only one context operation occurs at a time.
2235 int t3_sge_read_rspq(adapter_t *adapter, unsigned int id, u32 data[4])
2237 if (id >= SGE_QSETS)
2239 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2243 * t3_config_rss - configure Rx packet steering
2244 * @adapter: the adapter
2245 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2246 * @cpus: values for the CPU lookup table (0xff terminated)
2247 * @rspq: values for the response queue lookup table (0xffff terminated)
2249 * Programs the receive packet steering logic. @cpus and @rspq provide
2250 * the values for the CPU and response queue lookup tables. If they
2251 * provide fewer values than the size of the tables the supplied values
2252 * are used repeatedly until the tables are fully populated.
2254 void t3_config_rss(adapter_t *adapter, unsigned int rss_config, const u8 *cpus,
2257 int i, j, cpu_idx = 0, q_idx = 0;
2260 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2263 for (j = 0; j < 2; ++j) {
2264 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2265 if (cpus[cpu_idx] == 0xff)
2268 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2272 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2273 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2274 (i << 16) | rspq[q_idx++]);
2275 if (rspq[q_idx] == 0xffff)
2279 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2283 * t3_read_rss - read the contents of the RSS tables
2284 * @adapter: the adapter
2285 * @lkup: holds the contents of the RSS lookup table
2286 * @map: holds the contents of the RSS map table
2288 * Reads the contents of the receive packet steering tables.
2290 int t3_read_rss(adapter_t *adapter, u8 *lkup, u16 *map)
2296 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2297 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2299 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2300 if (!(val & 0x80000000))
2303 *lkup++ = (u8)(val >> 8);
2307 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2308 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2310 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2311 if (!(val & 0x80000000))
2319 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2320 * @adap: the adapter
2321 * @enable: 1 to select offload mode, 0 for regular NIC
2323 * Switches TP to NIC/offload mode.
2325 void t3_tp_set_offload_mode(adapter_t *adap, int enable)
2327 if (is_offload(adap) || !enable)
2328 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2329 V_NICMODE(!enable));
2333 * pm_num_pages - calculate the number of pages of the payload memory
2334 * @mem_size: the size of the payload memory
2335 * @pg_size: the size of each payload memory page
2337 * Calculate the number of pages, each of the given size, that fit in a
2338 * memory of the specified size, respecting the HW requirement that the
2339 * number of pages must be a multiple of 24.
2341 static inline unsigned int pm_num_pages(unsigned int mem_size,
2342 unsigned int pg_size)
2344 unsigned int n = mem_size / pg_size;
2349 #define mem_region(adap, start, size, reg) \
2350 t3_write_reg((adap), A_ ## reg, (start)); \
2354 * partition_mem - partition memory and configure TP memory settings
2355 * @adap: the adapter
2356 * @p: the TP parameters
2358 * Partitions context and payload memory and configures TP's memory
2361 static void partition_mem(adapter_t *adap, const struct tp_params *p)
2363 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2364 unsigned int timers = 0, timers_shift = 22;
2366 if (adap->params.rev > 0) {
2367 if (tids <= 16 * 1024) {
2370 } else if (tids <= 64 * 1024) {
2373 } else if (tids <= 256 * 1024) {
2379 t3_write_reg(adap, A_TP_PMM_SIZE,
2380 p->chan_rx_size | (p->chan_tx_size >> 16));
2382 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2383 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2384 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2385 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2386 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2388 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2389 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2390 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2392 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2393 /* Add a bit of headroom and make multiple of 24 */
2395 pstructs -= pstructs % 24;
2396 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2398 m = tids * TCB_SIZE;
2399 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2400 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2401 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2402 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2403 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2404 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2405 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2406 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2408 m = (m + 4095) & ~0xfff;
2409 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2410 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2412 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2413 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2414 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2416 adap->params.mc5.nservers += m - tids;
2419 static inline void tp_wr_indirect(adapter_t *adap, unsigned int addr, u32 val)
2421 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2422 t3_write_reg(adap, A_TP_PIO_DATA, val);
2425 static void tp_wr_bits_indirect(adapter_t *adap, unsigned int addr,
2426 unsigned int mask, unsigned int val)
2428 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2429 val |= t3_read_reg(adap, A_TP_PIO_DATA) & ~mask;
2430 t3_write_reg(adap, A_TP_PIO_DATA, val);
2433 static void tp_config(adapter_t *adap, const struct tp_params *p)
2435 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2436 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2437 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2438 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2439 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2440 V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
2441 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2442 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2443 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2444 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2445 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_IPV6ENABLE | F_NICMODE,
2446 F_IPV6ENABLE | F_NICMODE);
2447 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2448 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2449 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2450 adap->params.rev > 0 ? F_ENABLEESND :
2452 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2454 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2455 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2456 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, 0);
2457 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2458 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2460 if (adap->params.rev > 0) {
2461 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2462 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2464 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2465 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2467 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2469 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2470 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2471 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2472 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2474 if (adap->params.nports > 2) {
2475 t3_set_reg_field(adap, A_TP_PC_CONFIG2, 0,
2476 F_ENABLETXPORTFROMDA | F_ENABLERXPORTFROMADDR);
2477 tp_wr_bits_indirect(adap, A_TP_QOS_RX_MAP_MODE,
2478 V_RXMAPMODE(M_RXMAPMODE), 0);
2479 tp_wr_indirect(adap, A_TP_INGRESS_CONFIG, V_BITPOS0(48) |
2480 V_BITPOS1(49) | V_BITPOS2(50) | V_BITPOS3(51) |
2481 F_ENABLEEXTRACT | F_ENABLEEXTRACTIONSFD |
2482 F_ENABLEINSERTION | F_ENABLEINSERTIONSFD);
2483 tp_wr_indirect(adap, A_TP_PREAMBLE_MSB, 0xfb000000);
2484 tp_wr_indirect(adap, A_TP_PREAMBLE_LSB, 0xd5);
2485 tp_wr_indirect(adap, A_TP_INTF_FROM_TX_PKT, F_INTFFROMTXPKT);
2489 /* TCP timer values in ms */
2490 #define TP_DACK_TIMER 50
2491 #define TP_RTO_MIN 250
2494 * tp_set_timers - set TP timing parameters
2495 * @adap: the adapter to set
2496 * @core_clk: the core clock frequency in Hz
2498 * Set TP's timing parameters, such as the various timer resolutions and
2499 * the TCP timer values.
2501 static void tp_set_timers(adapter_t *adap, unsigned int core_clk)
2503 unsigned int tre = adap->params.tp.tre;
2504 unsigned int dack_re = adap->params.tp.dack_re;
2505 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2506 unsigned int tps = core_clk >> tre;
2508 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2509 V_DELAYEDACKRESOLUTION(dack_re) |
2510 V_TIMESTAMPRESOLUTION(tstamp_re));
2511 t3_write_reg(adap, A_TP_DACK_TIMER,
2512 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2513 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2514 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2515 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2516 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2517 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2518 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2519 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2522 #define SECONDS * tps
2524 t3_write_reg(adap, A_TP_MSL,
2525 adap->params.rev > 0 ? 0 : 2 SECONDS);
2526 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2527 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2528 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2529 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2530 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2531 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2532 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2533 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2538 #ifdef CONFIG_CHELSIO_T3_CORE
2540 * t3_tp_set_coalescing_size - set receive coalescing size
2541 * @adap: the adapter
2542 * @size: the receive coalescing size
2543 * @psh: whether a set PSH bit should deliver coalesced data
2545 * Set the receive coalescing size and PSH bit handling.
2547 int t3_tp_set_coalescing_size(adapter_t *adap, unsigned int size, int psh)
2551 if (size > MAX_RX_COALESCING_LEN)
2554 val = t3_read_reg(adap, A_TP_PARA_REG3);
2555 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2558 val |= F_RXCOALESCEENABLE;
2560 val |= F_RXCOALESCEPSHEN;
2561 size = min(MAX_RX_COALESCING_LEN, size);
2562 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2563 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2565 t3_write_reg(adap, A_TP_PARA_REG3, val);
2570 * t3_tp_set_max_rxsize - set the max receive size
2571 * @adap: the adapter
2572 * @size: the max receive size
2574 * Set TP's max receive size. This is the limit that applies when
2575 * receive coalescing is disabled.
2577 void t3_tp_set_max_rxsize(adapter_t *adap, unsigned int size)
2579 t3_write_reg(adap, A_TP_PARA_REG7,
2580 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2583 static void __devinit init_mtus(unsigned short mtus[])
2586 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2587 * it can accomodate max size TCP/IP headers when SACK and timestamps
2588 * are enabled and still have at least 8 bytes of payload.
2609 * Initial congestion control parameters.
2611 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
2613 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2638 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2641 b[13] = b[14] = b[15] = b[16] = 3;
2642 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2643 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2648 /* The minimum additive increment value for the congestion control table */
2649 #define CC_MIN_INCR 2U
2652 * t3_load_mtus - write the MTU and congestion control HW tables
2653 * @adap: the adapter
2654 * @mtus: the unrestricted values for the MTU table
2655 * @alphs: the values for the congestion control alpha parameter
2656 * @beta: the values for the congestion control beta parameter
2657 * @mtu_cap: the maximum permitted effective MTU
2659 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2660 * Update the high-speed congestion control table with the supplied alpha,
2663 void t3_load_mtus(adapter_t *adap, unsigned short mtus[NMTUS],
2664 unsigned short alpha[NCCTRL_WIN],
2665 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2667 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2668 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2669 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2670 28672, 40960, 57344, 81920, 114688, 163840, 229376 };
2674 for (i = 0; i < NMTUS; ++i) {
2675 unsigned int mtu = min(mtus[i], mtu_cap);
2676 unsigned int log2 = fls(mtu);
2678 if (!(mtu & ((1 << log2) >> 2))) /* round */
2680 t3_write_reg(adap, A_TP_MTU_TABLE,
2681 (i << 24) | (log2 << 16) | mtu);
2683 for (w = 0; w < NCCTRL_WIN; ++w) {
2686 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2689 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2690 (w << 16) | (beta[w] << 13) | inc);
2696 * t3_read_hw_mtus - returns the values in the HW MTU table
2697 * @adap: the adapter
2698 * @mtus: where to store the HW MTU values
2700 * Reads the HW MTU table.
2702 void t3_read_hw_mtus(adapter_t *adap, unsigned short mtus[NMTUS])
2706 for (i = 0; i < NMTUS; ++i) {
2709 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2710 val = t3_read_reg(adap, A_TP_MTU_TABLE);
2711 mtus[i] = val & 0x3fff;
2716 * t3_get_cong_cntl_tab - reads the congestion control table
2717 * @adap: the adapter
2718 * @incr: where to store the alpha values
2720 * Reads the additive increments programmed into the HW congestion
2723 void t3_get_cong_cntl_tab(adapter_t *adap,
2724 unsigned short incr[NMTUS][NCCTRL_WIN])
2726 unsigned int mtu, w;
2728 for (mtu = 0; mtu < NMTUS; ++mtu)
2729 for (w = 0; w < NCCTRL_WIN; ++w) {
2730 t3_write_reg(adap, A_TP_CCTRL_TABLE,
2731 0xffff0000 | (mtu << 5) | w);
2732 incr[mtu][w] = (unsigned short)t3_read_reg(adap,
2733 A_TP_CCTRL_TABLE) & 0x1fff;
2738 * t3_tp_get_mib_stats - read TP's MIB counters
2739 * @adap: the adapter
2740 * @tps: holds the returned counter values
2742 * Returns the values of TP's MIB counters.
2744 void t3_tp_get_mib_stats(adapter_t *adap, struct tp_mib_stats *tps)
2746 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *)tps,
2747 sizeof(*tps) / sizeof(u32), 0);
2751 * t3_read_pace_tbl - read the pace table
2752 * @adap: the adapter
2753 * @pace_vals: holds the returned values
2755 * Returns the values of TP's pace table in nanoseconds.
2757 void t3_read_pace_tbl(adapter_t *adap, unsigned int pace_vals[NTX_SCHED])
2759 unsigned int i, tick_ns = dack_ticks_to_usec(adap, 1000);
2761 for (i = 0; i < NTX_SCHED; i++) {
2762 t3_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
2763 pace_vals[i] = t3_read_reg(adap, A_TP_PACE_TABLE) * tick_ns;
2768 * t3_set_pace_tbl - set the pace table
2769 * @adap: the adapter
2770 * @pace_vals: the pace values in nanoseconds
2771 * @start: index of the first entry in the HW pace table to set
2772 * @n: how many entries to set
2774 * Sets (a subset of the) HW pace table.
2776 void t3_set_pace_tbl(adapter_t *adap, unsigned int *pace_vals,
2777 unsigned int start, unsigned int n)
2779 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
2781 for ( ; n; n--, start++, pace_vals++)
2782 t3_write_reg(adap, A_TP_PACE_TABLE, (start << 16) |
2783 ((*pace_vals + tick_ns / 2) / tick_ns));
2786 #define ulp_region(adap, name, start, len) \
2787 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2788 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2789 (start) + (len) - 1); \
2792 #define ulptx_region(adap, name, start, len) \
2793 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2794 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2795 (start) + (len) - 1)
2797 static void ulp_config(adapter_t *adap, const struct tp_params *p)
2799 unsigned int m = p->chan_rx_size;
2801 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2802 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2803 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2804 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2805 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2806 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2807 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2808 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2813 * t3_set_proto_sram - set the contents of the protocol sram
2814 * @adapter: the adapter
2815 * @data: the protocol image
2817 * Write the contents of the protocol SRAM.
2819 int t3_set_proto_sram(adapter_t *adap, u8 *data)
2822 u32 *buf = (u32 *)data;
2824 for (i = 0; i < PROTO_SRAM_LINES; i++) {
2825 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, htobe32(*buf++));
2826 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, htobe32(*buf++));
2827 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, htobe32(*buf++));
2828 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, htobe32(*buf++));
2829 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, htobe32(*buf++));
2831 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2832 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2839 void t3_config_trace_filter(adapter_t *adapter, const struct trace_params *tp,
2840 int filter_index, int invert, int enable)
2842 u32 addr, key[4], mask[4];
2844 key[0] = tp->sport | (tp->sip << 16);
2845 key[1] = (tp->sip >> 16) | (tp->dport << 16);
2847 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2849 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2850 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2851 mask[2] = tp->dip_mask;
2852 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2855 key[3] |= (1 << 29);
2857 key[3] |= (1 << 28);
2859 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2860 tp_wr_indirect(adapter, addr++, key[0]);
2861 tp_wr_indirect(adapter, addr++, mask[0]);
2862 tp_wr_indirect(adapter, addr++, key[1]);
2863 tp_wr_indirect(adapter, addr++, mask[1]);
2864 tp_wr_indirect(adapter, addr++, key[2]);
2865 tp_wr_indirect(adapter, addr++, mask[2]);
2866 tp_wr_indirect(adapter, addr++, key[3]);
2867 tp_wr_indirect(adapter, addr, mask[3]);
2868 (void) t3_read_reg(adapter, A_TP_PIO_DATA);
2872 * t3_config_sched - configure a HW traffic scheduler
2873 * @adap: the adapter
2874 * @kbps: target rate in Kbps
2875 * @sched: the scheduler index
2877 * Configure a Tx HW scheduler for the target rate.
2879 int t3_config_sched(adapter_t *adap, unsigned int kbps, int sched)
2881 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2882 unsigned int clk = adap->params.vpd.cclk * 1000;
2883 unsigned int selected_cpt = 0, selected_bpt = 0;
2886 kbps *= 125; /* -> bytes */
2887 for (cpt = 1; cpt <= 255; cpt++) {
2889 bpt = (kbps + tps / 2) / tps;
2890 if (bpt > 0 && bpt <= 255) {
2892 delta = v >= kbps ? v - kbps : kbps - v;
2893 if (delta <= mindelta) {
2898 } else if (selected_cpt)
2904 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
2905 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
2906 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
2908 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
2910 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
2911 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
2916 * t3_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
2917 * @adap: the adapter
2918 * @sched: the scheduler index
2919 * @ipg: the interpacket delay in tenths of nanoseconds
2921 * Set the interpacket delay for a HW packet rate scheduler.
2923 int t3_set_sched_ipg(adapter_t *adap, int sched, unsigned int ipg)
2925 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
2927 /* convert ipg to nearest number of core clocks */
2928 ipg *= core_ticks_per_usec(adap);
2929 ipg = (ipg + 5000) / 10000;
2933 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
2934 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
2936 v = (v & 0xffff) | (ipg << 16);
2938 v = (v & 0xffff0000) | ipg;
2939 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
2940 t3_read_reg(adap, A_TP_TM_PIO_DATA);
2945 * t3_get_tx_sched - get the configuration of a Tx HW traffic scheduler
2946 * @adap: the adapter
2947 * @sched: the scheduler index
2948 * @kbps: the byte rate in Kbps
2949 * @ipg: the interpacket delay in tenths of nanoseconds
2951 * Return the current configuration of a HW Tx scheduler.
2953 void t3_get_tx_sched(adapter_t *adap, unsigned int sched, unsigned int *kbps,
2956 unsigned int v, addr, bpt, cpt;
2959 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
2960 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
2961 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
2964 bpt = (v >> 8) & 0xff;
2967 *kbps = 0; /* scheduler disabled */
2969 v = (adap->params.vpd.cclk * 1000) / cpt;
2970 *kbps = (v * bpt) / 125;
2974 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
2975 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
2976 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
2980 *ipg = (10000 * v) / core_ticks_per_usec(adap);
2984 static int tp_init(adapter_t *adap, const struct tp_params *p)
2989 t3_set_vlan_accel(adap, 3, 0);
2991 if (is_offload(adap)) {
2992 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
2993 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
2994 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
2997 CH_ERR(adap, "TP initialization timed out\n");
3001 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3005 int t3_mps_set_active_ports(adapter_t *adap, unsigned int port_mask)
3007 if (port_mask & ~((1 << adap->params.nports) - 1))
3009 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3010 port_mask << S_PORT0ACTIVE);
3015 * Perform the bits of HW initialization that are dependent on the Tx
3016 * channels being used.
3018 static void chan_init_hw(adapter_t *adap, unsigned int chan_map)
3022 if (chan_map != 3) { /* one channel */
3023 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3024 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3025 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3026 (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3027 F_TPTXPORT1EN | F_PORT1ACTIVE));
3028 t3_write_reg(adap, A_PM1_TX_CFG,
3029 chan_map == 1 ? 0xffffffff : 0);
3030 } else { /* two channels */
3031 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3032 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3033 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3034 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3035 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3036 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3038 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3039 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3040 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3041 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3042 for (i = 0; i < 16; i++)
3043 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3044 (i << 16) | 0x1010);
3048 static int calibrate_xgm(adapter_t *adapter)
3050 if (uses_xaui(adapter)) {
3053 for (i = 0; i < 5; ++i) {
3054 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3055 (void) t3_read_reg(adapter, A_XGM_XAUI_IMP);
3057 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3058 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3059 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3060 V_XAUIIMP(G_CALIMP(v) >> 2));
3064 CH_ERR(adapter, "MAC calibration failed\n");
3067 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3068 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3069 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3070 F_XGM_IMPSETUPDATE);
3075 static void calibrate_xgm_t3b(adapter_t *adapter)
3077 if (!uses_xaui(adapter)) {
3078 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3079 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3080 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3081 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3082 F_XGM_IMPSETUPDATE);
3083 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3085 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3086 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3090 struct mc7_timing_params {
3091 unsigned char ActToPreDly;
3092 unsigned char ActToRdWrDly;
3093 unsigned char PreCyc;
3094 unsigned char RefCyc[5];
3095 unsigned char BkCyc;
3096 unsigned char WrToRdDly;
3097 unsigned char RdToWrDly;
3101 * Write a value to a register and check that the write completed. These
3102 * writes normally complete in a cycle or two, so one read should suffice.
3103 * The very first read exists to flush the posted write to the device.
3105 static int wrreg_wait(adapter_t *adapter, unsigned int addr, u32 val)
3107 t3_write_reg(adapter, addr, val);
3108 (void) t3_read_reg(adapter, addr); /* flush */
3109 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3111 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3115 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3117 static const unsigned int mc7_mode[] = {
3118 0x632, 0x642, 0x652, 0x432, 0x442
3120 static const struct mc7_timing_params mc7_timings[] = {
3121 { 12, 3, 4, { 20, 28, 34, 52, 0 }, 15, 6, 4 },
3122 { 12, 4, 5, { 20, 28, 34, 52, 0 }, 16, 7, 4 },
3123 { 12, 5, 6, { 20, 28, 34, 52, 0 }, 17, 8, 4 },
3124 { 9, 3, 4, { 15, 21, 26, 39, 0 }, 12, 6, 4 },
3125 { 9, 4, 5, { 15, 21, 26, 39, 0 }, 13, 7, 4 }
3129 unsigned int width, density, slow, attempts;
3130 adapter_t *adapter = mc7->adapter;
3131 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3136 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3137 slow = val & F_SLOW;
3138 width = G_WIDTH(val);
3139 density = G_DEN(val);
3141 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3142 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3146 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3147 (void) t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3149 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3150 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3151 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3157 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3158 V_ACTTOPREDLY(p->ActToPreDly) |
3159 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3160 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3161 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3163 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3164 val | F_CLKEN | F_TERM150);
3165 (void) t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3168 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3173 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3174 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3175 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3176 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3180 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3181 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL,
3186 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3187 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3188 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3189 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3190 mc7_mode[mem_type]) ||
3191 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3192 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3195 /* clock value is in KHz */
3196 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3197 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3199 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3200 F_PERREFEN | V_PREREFDIV(mc7_clock));
3201 (void) t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3203 t3_write_reg(adapter, mc7->offset + A_MC7_ECC,
3204 F_ECCGENEN | F_ECCCHKEN);
3205 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3206 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3207 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3208 (mc7->size << width) - 1);
3209 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3210 (void) t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3215 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3216 } while ((val & F_BUSY) && --attempts);
3218 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3222 /* Enable normal memory accesses. */
3223 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3230 static void config_pcie(adapter_t *adap)
3232 static const u16 ack_lat[4][6] = {
3233 { 237, 416, 559, 1071, 2095, 4143 },
3234 { 128, 217, 289, 545, 1057, 2081 },
3235 { 73, 118, 154, 282, 538, 1050 },
3236 { 67, 107, 86, 150, 278, 534 }
3238 static const u16 rpl_tmr[4][6] = {
3239 { 711, 1248, 1677, 3213, 6285, 12429 },
3240 { 384, 651, 867, 1635, 3171, 6243 },
3241 { 219, 354, 462, 846, 1614, 3150 },
3242 { 201, 321, 258, 450, 834, 1602 }
3246 unsigned int log2_width, pldsize;
3247 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3249 t3_os_pci_read_config_2(adap,
3250 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3252 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3254 t3_os_pci_read_config_2(adap,
3255 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3258 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3259 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3260 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3261 log2_width = fls(adap->params.pci.width) - 1;
3262 acklat = ack_lat[log2_width][pldsize];
3263 if (val & 1) /* check LOsEnable */
3264 acklat += fst_trn_tx * 4;
3265 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3267 if (adap->params.rev == 0)
3268 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3269 V_T3A_ACKLAT(M_T3A_ACKLAT),
3270 V_T3A_ACKLAT(acklat));
3272 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3275 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3276 V_REPLAYLMT(rpllmt));
3278 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3279 t3_set_reg_field(adap, A_PCIE_CFG, F_PCIE_CLIDECEN, F_PCIE_CLIDECEN);
3283 * Initialize and configure T3 HW modules. This performs the
3284 * initialization steps that need to be done once after a card is reset.
3285 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3287 * fw_params are passed to FW and their value is platform dependent. Only the
3288 * top 8 bits are available for use, the rest must be 0.
3290 int t3_init_hw(adapter_t *adapter, u32 fw_params)
3292 int err = -EIO, attempts = 100;
3293 const struct vpd_params *vpd = &adapter->params.vpd;
3295 if (adapter->params.rev > 0)
3296 calibrate_xgm_t3b(adapter);
3297 else if (calibrate_xgm(adapter))
3300 if (adapter->params.nports > 2) {
3301 t3_mac_reset(&adap2pinfo(adapter, 0)->mac);
3302 if ((err = t3_vsc7323_init(adapter, adapter->params.nports)))
3307 partition_mem(adapter, &adapter->params.tp);
3309 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3310 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3311 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3312 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3313 adapter->params.mc5.nfilters,
3314 adapter->params.mc5.nroutes))
3318 if (tp_init(adapter, &adapter->params.tp))
3321 #ifdef CONFIG_CHELSIO_T3_CORE
3322 t3_tp_set_coalescing_size(adapter,
3323 min(adapter->params.sge.max_pkt_size,
3324 MAX_RX_COALESCING_LEN), 1);
3325 t3_tp_set_max_rxsize(adapter,
3326 min(adapter->params.sge.max_pkt_size, 16384U));
3327 ulp_config(adapter, &adapter->params.tp);
3329 if (is_pcie(adapter))
3330 config_pcie(adapter);
3332 t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN);
3334 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3335 chan_init_hw(adapter, adapter->params.chan_map);
3336 t3_sge_init(adapter, &adapter->params.sge);
3338 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3339 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3340 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3341 (void) t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3343 do { /* wait for uP to initialize */
3345 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3347 CH_ERR(adapter, "uP initialization timed out\n");
3357 * get_pci_mode - determine a card's PCI mode
3358 * @adapter: the adapter
3359 * @p: where to store the PCI settings
3361 * Determines a card's PCI mode and associated parameters, such as speed
3364 static void __devinit get_pci_mode(adapter_t *adapter, struct pci_params *p)
3366 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3367 u32 pci_mode, pcie_cap;
3369 pcie_cap = t3_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
3373 p->variant = PCI_VARIANT_PCIE;
3374 p->pcie_cap_addr = pcie_cap;
3375 t3_os_pci_read_config_2(adapter, pcie_cap + PCI_EXP_LNKSTA,
3377 p->width = (val >> 4) & 0x3f;
3381 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3382 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3383 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3384 pci_mode = G_PCIXINITPAT(pci_mode);
3386 p->variant = PCI_VARIANT_PCI;
3387 else if (pci_mode < 4)
3388 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3389 else if (pci_mode < 8)
3390 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3392 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3396 * init_link_config - initialize a link's SW state
3397 * @lc: structure holding the link state
3398 * @ai: information about the current card
3400 * Initializes the SW state maintained for each link, including the link's
3401 * capabilities and default speed/duplex/flow-control/autonegotiation
3404 static void __devinit init_link_config(struct link_config *lc,
3407 lc->supported = caps;
3408 lc->requested_speed = lc->speed = SPEED_INVALID;
3409 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3410 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3411 if (lc->supported & SUPPORTED_Autoneg) {
3412 lc->advertising = lc->supported;
3413 lc->autoneg = AUTONEG_ENABLE;
3414 lc->requested_fc |= PAUSE_AUTONEG;
3416 lc->advertising = 0;
3417 lc->autoneg = AUTONEG_DISABLE;
3422 * mc7_calc_size - calculate MC7 memory size
3423 * @cfg: the MC7 configuration
3425 * Calculates the size of an MC7 memory in bytes from the value of its
3426 * configuration register.
3428 static unsigned int __devinit mc7_calc_size(u32 cfg)
3430 unsigned int width = G_WIDTH(cfg);
3431 unsigned int banks = !!(cfg & F_BKS) + 1;
3432 unsigned int org = !!(cfg & F_ORG) + 1;
3433 unsigned int density = G_DEN(cfg);
3434 unsigned int MBs = ((256 << density) * banks) / (org << width);
3439 static void __devinit mc7_prep(adapter_t *adapter, struct mc7 *mc7,
3440 unsigned int base_addr, const char *name)
3444 mc7->adapter = adapter;
3446 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3447 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3448 mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3449 mc7->width = G_WIDTH(cfg);
3452 void mac_prep(struct cmac *mac, adapter_t *adapter, int index)
3454 mac->adapter = adapter;
3455 mac->multiport = adapter->params.nports > 2;
3457 if (mac->multiport) {
3458 mac->ext_port = (unsigned char)index;
3464 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3466 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3467 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3468 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3469 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3474 void early_hw_init(adapter_t *adapter, const struct adapter_info *ai)
3476 u32 val = V_PORTSPEED(is_10G(adapter) || adapter->params.nports > 2 ?
3479 mi1_init(adapter, ai);
3480 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3481 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3482 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3483 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3484 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3486 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3489 /* Enable MAC clocks so we can access the registers */
3490 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3491 (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
3493 val |= F_CLKDIVRESET_;
3494 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3495 (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
3496 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3497 (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
3501 * Reset the adapter. PCIe cards lose their config space during reset, PCI-X
3504 static int t3_reset_adapter(adapter_t *adapter)
3506 int i, save_and_restore_pcie =
3507 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3510 if (save_and_restore_pcie)
3511 t3_os_pci_save_state(adapter);
3512 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3515 * Delay. Give Some time to device to reset fully.
3516 * XXX The delay time should be modified.
3518 for (i = 0; i < 10; i++) {
3520 t3_os_pci_read_config_2(adapter, 0x00, &devid);
3521 if (devid == 0x1425)
3525 if (devid != 0x1425)
3528 if (save_and_restore_pcie)
3529 t3_os_pci_restore_state(adapter);
3534 * Initialize adapter SW state for the various HW modules, set initial values
3535 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3538 int __devinit t3_prep_adapter(adapter_t *adapter,
3539 const struct adapter_info *ai, int reset)
3542 unsigned int i, j = 0;
3544 get_pci_mode(adapter, &adapter->params.pci);
3546 adapter->params.info = ai;
3547 adapter->params.nports = ai->nports0 + ai->nports1;
3548 adapter->params.chan_map = !!ai->nports0 | (!!ai->nports1 << 1);
3549 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3550 adapter->params.linkpoll_period = 0;
3551 adapter->params.stats_update_period = is_10G(adapter) ?
3552 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3553 adapter->params.pci.vpd_cap_addr =
3554 t3_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
3556 ret = get_vpd_params(adapter, &adapter->params.vpd);
3558 printf("failed to get VPD params\n");
3561 if (reset && t3_reset_adapter(adapter))
3564 t3_sge_prep(adapter, &adapter->params.sge);
3566 if (adapter->params.vpd.mclk) {
3567 struct tp_params *p = &adapter->params.tp;
3569 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3570 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3571 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3573 p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
3574 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3575 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3576 p->cm_size = t3_mc7_size(&adapter->cm);
3577 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3578 p->chan_tx_size = p->pmtx_size / p->nchan;
3579 p->rx_pg_size = 64 * 1024;
3580 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3581 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3582 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3583 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3584 adapter->params.rev > 0 ? 12 : 6;
3585 p->tre = fls(adapter->params.vpd.cclk / (1000 / TP_TMR_RES)) -
3587 p->dack_re = fls(adapter->params.vpd.cclk / 10) - 1; /* 100us */
3590 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3591 t3_mc7_size(&adapter->pmtx) &&
3592 t3_mc7_size(&adapter->cm);
3594 if (is_offload(adapter)) {
3595 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3596 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3597 DEFAULT_NFILTERS : 0;
3598 adapter->params.mc5.nroutes = 0;
3599 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3601 #ifdef CONFIG_CHELSIO_T3_CORE
3602 init_mtus(adapter->params.mtus);
3603 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3607 early_hw_init(adapter, ai);
3609 for_each_port(adapter, i) {
3611 struct port_info *p = adap2pinfo(adapter, i);
3613 while (adapter->params.vpd.port_type[j] == 0) {
3616 if (adapter->params.vpd.port_type[j] > sizeof(port_types)/sizeof(port_types[0])) {
3617 printf("bad port type idx=%d\n", adapter->params.vpd.port_type[j]);
3618 printf("port types: ");
3619 for (i = 0; i < j; i++)
3620 printf("port[%d]=%d ", i, adapter->params.vpd.port_type[i]);
3626 p->port_type = &port_types[adapter->params.vpd.port_type[j]];
3627 p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3629 mac_prep(&p->mac, adapter, j);
3633 * The VPD EEPROM stores the base Ethernet address for the
3634 * card. A port's address is derived from the base by adding
3635 * the port's index to the base's low octet.
3637 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3638 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3640 t3_os_set_hw_addr(adapter, i, hw_addr);
3641 init_link_config(&p->link_config, p->port_type->caps);
3642 p->phy.ops->power_down(&p->phy, 1);
3643 if (!(p->port_type->caps & SUPPORTED_IRQ))
3644 adapter->params.linkpoll_period = 10;
3650 void t3_led_ready(adapter_t *adapter)
3652 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3656 void t3_port_failover(adapter_t *adapter, int port)
3660 val = port ? F_PORT1ACTIVE : F_PORT0ACTIVE;
3661 t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
3665 void t3_failover_done(adapter_t *adapter, int port)
3667 t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
3668 F_PORT0ACTIVE | F_PORT1ACTIVE);
3671 void t3_failover_clear(adapter_t *adapter)
3673 t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
3674 F_PORT0ACTIVE | F_PORT1ACTIVE);