1 /**************************************************************************
3 Copyright (c) 2007, Chelsio Inc.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Chelsio Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include <dev/cxgb/common/cxgb_common.h>
38 #include <dev/cxgb/common/cxgb_regs.h>
39 #include <dev/cxgb/common/cxgb_sge_defs.h>
40 #include <dev/cxgb/common/cxgb_firmware_exports.h>
43 * t3_wait_op_done_val - wait until an operation is completed
44 * @adapter: the adapter performing the operation
45 * @reg: the register to check for completion
46 * @mask: a single-bit field within @reg that indicates completion
47 * @polarity: the value of the field when the operation is completed
48 * @attempts: number of check iterations
49 * @delay: delay in usecs between iterations
50 * @valp: where to store the value of the register at completion time
52 * Wait until an operation is completed by checking a bit in a register
53 * up to @attempts times. If @valp is not NULL the value of the register
54 * at the time it indicated completion is stored there. Returns 0 if the
55 * operation completes and -EAGAIN otherwise.
57 int t3_wait_op_done_val(adapter_t *adapter, int reg, u32 mask, int polarity,
58 int attempts, int delay, u32 *valp)
61 u32 val = t3_read_reg(adapter, reg);
63 if (!!(val & mask) == polarity) {
76 * t3_write_regs - write a bunch of registers
77 * @adapter: the adapter to program
78 * @p: an array of register address/register value pairs
79 * @n: the number of address/value pairs
80 * @offset: register address offset
82 * Takes an array of register address/register value pairs and writes each
83 * value to the corresponding register. Register addresses are adjusted
84 * by the supplied offset.
86 void t3_write_regs(adapter_t *adapter, const struct addr_val_pair *p, int n,
90 t3_write_reg(adapter, p->reg_addr + offset, p->val);
96 * t3_set_reg_field - set a register field to a value
97 * @adapter: the adapter to program
98 * @addr: the register address
99 * @mask: specifies the portion of the register to modify
100 * @val: the new value for the register field
102 * Sets a register field specified by the supplied mask to the
105 void t3_set_reg_field(adapter_t *adapter, unsigned int addr, u32 mask, u32 val)
107 u32 v = t3_read_reg(adapter, addr) & ~mask;
109 t3_write_reg(adapter, addr, v | val);
110 (void) t3_read_reg(adapter, addr); /* flush */
114 * t3_read_indirect - read indirectly addressed registers
116 * @addr_reg: register holding the indirect address
117 * @data_reg: register holding the value of the indirect register
118 * @vals: where the read register values are stored
119 * @start_idx: index of first indirect register to read
120 * @nregs: how many indirect registers to read
122 * Reads registers that are accessed indirectly through an address/data
125 void t3_read_indirect(adapter_t *adap, unsigned int addr_reg,
126 unsigned int data_reg, u32 *vals, unsigned int nregs,
127 unsigned int start_idx)
130 t3_write_reg(adap, addr_reg, start_idx);
131 *vals++ = t3_read_reg(adap, data_reg);
137 * t3_mc7_bd_read - read from MC7 through backdoor accesses
138 * @mc7: identifies MC7 to read from
139 * @start: index of first 64-bit word to read
140 * @n: number of 64-bit words to read
141 * @buf: where to store the read result
143 * Read n 64-bit words from MC7 starting at word start, using backdoor
146 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
149 static int shift[] = { 0, 0, 16, 24 };
150 static int step[] = { 0, 32, 16, 8 };
152 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
153 adapter_t *adap = mc7->adapter;
155 if (start >= size64 || start + n > size64)
158 start *= (8 << mc7->width);
163 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
167 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR,
169 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
170 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
171 while ((val & F_BUSY) && attempts--)
172 val = t3_read_reg(adap,
173 mc7->offset + A_MC7_BD_OP);
177 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
178 if (mc7->width == 0) {
179 val64 = t3_read_reg(adap,
180 mc7->offset + A_MC7_BD_DATA0);
181 val64 |= (u64)val << 32;
184 val >>= shift[mc7->width];
185 val64 |= (u64)val << (step[mc7->width] * i);
197 static void mi1_init(adapter_t *adap, const struct adapter_info *ai)
199 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
200 u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
203 if (!(ai->caps & SUPPORTED_10000baseT_Full))
205 t3_write_reg(adap, A_MI1_CFG, val);
208 #define MDIO_ATTEMPTS 10
211 * MI1 read/write operations for direct-addressed PHYs.
213 static int mi1_read(adapter_t *adapter, int phy_addr, int mmd_addr,
214 int reg_addr, unsigned int *valp)
217 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
223 t3_write_reg(adapter, A_MI1_ADDR, addr);
224 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
225 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
227 *valp = t3_read_reg(adapter, A_MI1_DATA);
228 MDIO_UNLOCK(adapter);
232 static int mi1_write(adapter_t *adapter, int phy_addr, int mmd_addr,
233 int reg_addr, unsigned int val)
236 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
242 t3_write_reg(adapter, A_MI1_ADDR, addr);
243 t3_write_reg(adapter, A_MI1_DATA, val);
244 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
245 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
246 MDIO_UNLOCK(adapter);
250 static struct mdio_ops mi1_mdio_ops = {
256 * MI1 read/write operations for indirect-addressed PHYs.
258 static int mi1_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
259 int reg_addr, unsigned int *valp)
262 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
265 t3_write_reg(adapter, A_MI1_ADDR, addr);
266 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
267 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
268 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
270 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
271 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
274 *valp = t3_read_reg(adapter, A_MI1_DATA);
276 MDIO_UNLOCK(adapter);
280 static int mi1_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
281 int reg_addr, unsigned int val)
284 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
287 t3_write_reg(adapter, A_MI1_ADDR, addr);
288 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
289 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
290 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
292 t3_write_reg(adapter, A_MI1_DATA, val);
293 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
294 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
297 MDIO_UNLOCK(adapter);
301 static struct mdio_ops mi1_mdio_ext_ops = {
307 * t3_mdio_change_bits - modify the value of a PHY register
308 * @phy: the PHY to operate on
309 * @mmd: the device address
310 * @reg: the register address
311 * @clear: what part of the register value to mask off
312 * @set: what part of the register value to set
314 * Changes the value of a PHY register by applying a mask to its current
315 * value and ORing the result with a new value.
317 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
323 ret = mdio_read(phy, mmd, reg, &val);
326 ret = mdio_write(phy, mmd, reg, val | set);
332 * t3_phy_reset - reset a PHY block
333 * @phy: the PHY to operate on
334 * @mmd: the device address of the PHY block to reset
335 * @wait: how long to wait for the reset to complete in 1ms increments
337 * Resets a PHY block and optionally waits for the reset to complete.
338 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
341 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
346 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
351 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
357 } while (ctl && --wait);
363 * t3_phy_advertise - set the PHY advertisement registers for autoneg
364 * @phy: the PHY to operate on
365 * @advert: bitmap of capabilities the PHY should advertise
367 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
368 * requested capabilities.
370 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
373 unsigned int val = 0;
375 err = mdio_read(phy, 0, MII_CTRL1000, &val);
379 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
380 if (advert & ADVERTISED_1000baseT_Half)
381 val |= ADVERTISE_1000HALF;
382 if (advert & ADVERTISED_1000baseT_Full)
383 val |= ADVERTISE_1000FULL;
385 err = mdio_write(phy, 0, MII_CTRL1000, val);
390 if (advert & ADVERTISED_10baseT_Half)
391 val |= ADVERTISE_10HALF;
392 if (advert & ADVERTISED_10baseT_Full)
393 val |= ADVERTISE_10FULL;
394 if (advert & ADVERTISED_100baseT_Half)
395 val |= ADVERTISE_100HALF;
396 if (advert & ADVERTISED_100baseT_Full)
397 val |= ADVERTISE_100FULL;
398 if (advert & ADVERTISED_Pause)
399 val |= ADVERTISE_PAUSE_CAP;
400 if (advert & ADVERTISED_Asym_Pause)
401 val |= ADVERTISE_PAUSE_ASYM;
402 return mdio_write(phy, 0, MII_ADVERTISE, val);
406 * t3_set_phy_speed_duplex - force PHY speed and duplex
407 * @phy: the PHY to operate on
408 * @speed: requested PHY speed
409 * @duplex: requested PHY duplex
411 * Force a 10/100/1000 PHY's speed and duplex. This also disables
412 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
414 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
419 err = mdio_read(phy, 0, MII_BMCR, &ctl);
424 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
425 if (speed == SPEED_100)
426 ctl |= BMCR_SPEED100;
427 else if (speed == SPEED_1000)
428 ctl |= BMCR_SPEED1000;
431 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
432 if (duplex == DUPLEX_FULL)
433 ctl |= BMCR_FULLDPLX;
435 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
436 ctl |= BMCR_ANENABLE;
437 return mdio_write(phy, 0, MII_BMCR, ctl);
440 static struct adapter_info t3_adap_info[] = {
442 F_GPIO2_OEN | F_GPIO4_OEN |
443 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
445 &mi1_mdio_ops, "Chelsio PE9000" },
447 F_GPIO2_OEN | F_GPIO4_OEN |
448 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
450 &mi1_mdio_ops, "Chelsio T302" },
452 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
453 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
454 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_OFFLOAD,
455 &mi1_mdio_ext_ops, "Chelsio T310" },
457 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
458 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
459 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
460 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_OFFLOAD,
461 &mi1_mdio_ext_ops, "Chelsio T320" },
465 * Return the adapter_info structure with a given index. Out-of-range indices
468 const struct adapter_info *t3_get_adapter_info(unsigned int id)
470 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
473 #define CAPS_1G (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | \
474 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII)
475 #define CAPS_10G (SUPPORTED_10000baseT_Full | SUPPORTED_AUI)
477 static struct port_type_info port_types[] = {
479 { t3_ael1002_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
481 { t3_vsc8211_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
482 "10/100/1000BASE-T" },
483 { t3_mv88e1xxx_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
484 "10/100/1000BASE-T" },
485 { t3_xaui_direct_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4" },
486 { NULL, CAPS_10G, "10GBASE-KX4" },
487 { t3_qt2045_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4" },
488 { t3_ael1006_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
490 { NULL, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4" },
496 #define VPD_ENTRY(name, len) \
497 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
500 * Partial EEPROM Vital Product Data structure. Includes only the ID and
509 VPD_ENTRY(pn, 16); /* part number */
510 VPD_ENTRY(ec, 16); /* EC level */
511 VPD_ENTRY(sn, 16); /* serial number */
512 VPD_ENTRY(na, 12); /* MAC address base */
513 VPD_ENTRY(cclk, 6); /* core clock */
514 VPD_ENTRY(mclk, 6); /* mem clock */
515 VPD_ENTRY(uclk, 6); /* uP clk */
516 VPD_ENTRY(mdc, 6); /* MDIO clk */
517 VPD_ENTRY(mt, 2); /* mem timing */
518 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
519 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
520 VPD_ENTRY(port0, 2); /* PHY0 complex */
521 VPD_ENTRY(port1, 2); /* PHY1 complex */
522 VPD_ENTRY(port2, 2); /* PHY2 complex */
523 VPD_ENTRY(port3, 2); /* PHY3 complex */
524 VPD_ENTRY(rv, 1); /* csum */
525 u32 pad; /* for multiple-of-4 sizing and alignment */
528 #define EEPROM_MAX_POLL 4
529 #define EEPROM_STAT_ADDR 0x4000
530 #define VPD_BASE 0xc00
533 * t3_seeprom_read - read a VPD EEPROM location
534 * @adapter: adapter to read
535 * @addr: EEPROM address
536 * @data: where to store the read data
538 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
539 * VPD ROM capability. A zero is written to the flag bit when the
540 * addres is written to the control register. The hardware device will
541 * set the flag to 1 when 4 bytes have been read into the data register.
543 int t3_seeprom_read(adapter_t *adapter, u32 addr, u32 *data)
546 int attempts = EEPROM_MAX_POLL;
547 unsigned int base = adapter->params.pci.vpd_cap_addr;
549 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
552 t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR, (u16)addr);
555 t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
556 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
558 if (!(val & PCI_VPD_ADDR_F)) {
559 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
562 t3_os_pci_read_config_4(adapter, base + PCI_VPD_DATA, data);
563 *data = le32_to_cpu(*data);
568 * t3_seeprom_write - write a VPD EEPROM location
569 * @adapter: adapter to write
570 * @addr: EEPROM address
571 * @data: value to write
573 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
574 * VPD ROM capability.
576 int t3_seeprom_write(adapter_t *adapter, u32 addr, u32 data)
579 int attempts = EEPROM_MAX_POLL;
580 unsigned int base = adapter->params.pci.vpd_cap_addr;
582 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
585 t3_os_pci_write_config_4(adapter, base + PCI_VPD_DATA,
587 t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR,
588 (u16)addr | PCI_VPD_ADDR_F);
591 t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
592 } while ((val & PCI_VPD_ADDR_F) && --attempts);
594 if (val & PCI_VPD_ADDR_F) {
595 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
602 * t3_seeprom_wp - enable/disable EEPROM write protection
603 * @adapter: the adapter
604 * @enable: 1 to enable write protection, 0 to disable it
606 * Enables or disables write protection on the serial EEPROM.
608 int t3_seeprom_wp(adapter_t *adapter, int enable)
610 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
614 * Convert a character holding a hex digit to a number.
616 static unsigned int hex2int(unsigned char c)
618 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
622 * get_vpd_params - read VPD parameters from VPD EEPROM
623 * @adapter: adapter to read
624 * @p: where to store the parameters
626 * Reads card parameters stored in VPD EEPROM.
628 static int get_vpd_params(adapter_t *adapter, struct vpd_params *p)
634 * Card information is normally at VPD_BASE but some early cards had
637 ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
640 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
642 for (i = 0; i < sizeof(vpd); i += 4) {
643 ret = t3_seeprom_read(adapter, addr + i,
644 (u32 *)((u8 *)&vpd + i));
649 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
650 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
651 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
652 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
653 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
655 /* Old eeproms didn't have port information */
656 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
657 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
658 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
660 p->port_type[0] = (u8)hex2int(vpd.port0_data[0]);
661 p->port_type[1] = (u8)hex2int(vpd.port1_data[0]);
662 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
663 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
666 for (i = 0; i < 6; i++)
667 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
668 hex2int(vpd.na_data[2 * i + 1]);
672 /* serial flash and firmware constants */
674 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
675 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
676 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
678 /* flash command opcodes */
679 SF_PROG_PAGE = 2, /* program page */
680 SF_WR_DISABLE = 4, /* disable writes */
681 SF_RD_STATUS = 5, /* read status register */
682 SF_WR_ENABLE = 6, /* enable writes */
683 SF_RD_DATA_FAST = 0xb, /* read flash */
684 SF_ERASE_SECTOR = 0xd8, /* erase sector */
686 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
687 FW_VERS_ADDR = 0x77ffc /* flash address holding FW version */
691 * sf1_read - read data from the serial flash
692 * @adapter: the adapter
693 * @byte_cnt: number of bytes to read
694 * @cont: whether another operation will be chained
695 * @valp: where to store the read data
697 * Reads up to 4 bytes of data from the serial flash. The location of
698 * the read needs to be specified prior to calling this by issuing the
699 * appropriate commands to the serial flash.
701 static int sf1_read(adapter_t *adapter, unsigned int byte_cnt, int cont,
706 if (!byte_cnt || byte_cnt > 4)
708 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
710 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
711 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
713 *valp = t3_read_reg(adapter, A_SF_DATA);
718 * sf1_write - write data to the serial flash
719 * @adapter: the adapter
720 * @byte_cnt: number of bytes to write
721 * @cont: whether another operation will be chained
722 * @val: value to write
724 * Writes up to 4 bytes of data to the serial flash. The location of
725 * the write needs to be specified prior to calling this by issuing the
726 * appropriate commands to the serial flash.
728 static int sf1_write(adapter_t *adapter, unsigned int byte_cnt, int cont,
731 if (!byte_cnt || byte_cnt > 4)
733 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
735 t3_write_reg(adapter, A_SF_DATA, val);
736 t3_write_reg(adapter, A_SF_OP,
737 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
738 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
742 * flash_wait_op - wait for a flash operation to complete
743 * @adapter: the adapter
744 * @attempts: max number of polls of the status register
745 * @delay: delay between polls in ms
747 * Wait for a flash operation to complete by polling the status register.
749 static int flash_wait_op(adapter_t *adapter, int attempts, int delay)
755 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
756 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
768 * t3_read_flash - read words from serial flash
769 * @adapter: the adapter
770 * @addr: the start address for the read
771 * @nwords: how many 32-bit words to read
772 * @data: where to store the read data
773 * @byte_oriented: whether to store data as bytes or as words
775 * Read the specified number of 32-bit words from the serial flash.
776 * If @byte_oriented is set the read data is stored as a byte array
777 * (i.e., big-endian), otherwise as 32-bit words in the platform's
780 int t3_read_flash(adapter_t *adapter, unsigned int addr, unsigned int nwords,
781 u32 *data, int byte_oriented)
785 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
788 addr = swab32(addr) | SF_RD_DATA_FAST;
790 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
791 (ret = sf1_read(adapter, 1, 1, data)) != 0)
794 for ( ; nwords; nwords--, data++) {
795 ret = sf1_read(adapter, 4, nwords > 1, data);
799 *data = htonl(*data);
805 * t3_write_flash - write up to a page of data to the serial flash
806 * @adapter: the adapter
807 * @addr: the start address to write
808 * @n: length of data to write
809 * @data: the data to write
811 * Writes up to a page of data (256 bytes) to the serial flash starting
812 * at the given address.
814 static int t3_write_flash(adapter_t *adapter, unsigned int addr,
815 unsigned int n, const u8 *data)
819 unsigned int i, c, left, val, offset = addr & 0xff;
821 if (addr + n > SF_SIZE || offset + n > 256)
824 val = swab32(addr) | SF_PROG_PAGE;
826 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
827 (ret = sf1_write(adapter, 4, 1, val)) != 0)
830 for (left = n; left; left -= c) {
832 for (val = 0, i = 0; i < c; ++i)
833 val = (val << 8) + *data++;
835 ret = sf1_write(adapter, c, c != left, val);
839 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
842 /* Read the page to verify the write succeeded */
843 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
847 if (memcmp(data - n, (u8 *)buf + offset, n))
852 enum fw_version_type {
858 * t3_get_fw_version - read the firmware version
859 * @adapter: the adapter
860 * @vers: where to place the version
862 * Reads the FW version from flash.
864 int t3_get_fw_version(adapter_t *adapter, u32 *vers)
866 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
870 * t3_check_fw_version - check if the FW is compatible with this driver
871 * @adapter: the adapter
873 * Checks if an adapter's FW is compatible with the driver. Returns 0
874 * if the versions are compatible, a negative error otherwise.
876 int t3_check_fw_version(adapter_t *adapter)
880 unsigned int type, major, minor;
882 ret = t3_get_fw_version(adapter, &vers);
886 type = G_FW_VERSION_TYPE(vers);
887 major = G_FW_VERSION_MAJOR(vers);
888 minor = G_FW_VERSION_MINOR(vers);
890 if (type == FW_VERSION_T3 && major == CHELSIO_FW_MAJOR && minor == CHELSIO_FW_MINOR)
893 CH_ERR(adapter, "found wrong FW version(%u.%u), "
894 "driver needs version %d.%d\n", major, minor,
895 CHELSIO_FW_MAJOR, CHELSIO_FW_MINOR);
900 * t3_flash_erase_sectors - erase a range of flash sectors
901 * @adapter: the adapter
902 * @start: the first sector to erase
903 * @end: the last sector to erase
905 * Erases the sectors in the given range.
907 static int t3_flash_erase_sectors(adapter_t *adapter, int start, int end)
909 while (start <= end) {
912 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
913 (ret = sf1_write(adapter, 4, 0,
914 SF_ERASE_SECTOR | (start << 8))) != 0 ||
915 (ret = flash_wait_op(adapter, 5, 500)) != 0)
923 * t3_load_fw - download firmware
924 * @adapter: the adapter
925 * @fw_data: the firrware image to write
928 * Write the supplied firmware image to the card's serial flash.
929 * The FW image has the following sections: @size - 8 bytes of code and
930 * data, followed by 4 bytes of FW version, followed by the 32-bit
931 * 1's complement checksum of the whole image.
933 int t3_load_fw(adapter_t *adapter, const u8 *fw_data, unsigned int size)
937 const u32 *p = (const u32 *)fw_data;
938 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
942 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
945 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
947 if (csum != 0xffffffff) {
948 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
953 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
957 size -= 8; /* trim off version and checksum */
958 for (addr = FW_FLASH_BOOT_ADDR; size; ) {
959 unsigned int chunk_size = min(size, 256U);
961 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
966 fw_data += chunk_size;
970 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
973 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
977 #define CIM_CTL_BASE 0x2000
980 * t3_cim_ctl_blk_read - read a block from CIM control region
983 * @addr: the start address within the CIM control region
984 * @n: number of words to read
985 * @valp: where to store the result
987 * Reads a block of 4-byte words from the CIM control region.
989 int t3_cim_ctl_blk_read(adapter_t *adap, unsigned int addr, unsigned int n,
994 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
997 for ( ; !ret && n--; addr += 4) {
998 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
999 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1002 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1008 * t3_link_changed - handle interface link changes
1009 * @adapter: the adapter
1010 * @port_id: the port index that changed link state
1012 * Called when a port's link settings change to propagate the new values
1013 * to the associated PHY and MAC. After performing the common tasks it
1014 * invokes an OS-specific handler.
1016 void t3_link_changed(adapter_t *adapter, int port_id)
1018 int link_ok, speed, duplex, fc;
1019 struct cphy *phy = &adapter->port[port_id].phy;
1020 struct cmac *mac = &adapter->port[port_id].mac;
1021 struct link_config *lc = &adapter->port[port_id].link_config;
1023 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1025 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1026 uses_xaui(adapter)) {
1029 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1030 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1032 lc->link_ok = (unsigned char)link_ok;
1033 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1034 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1035 if (lc->requested_fc & PAUSE_AUTONEG)
1036 fc &= lc->requested_fc;
1038 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1040 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1041 /* Set MAC speed, duplex, and flow control to match PHY. */
1042 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1043 lc->fc = (unsigned char)fc;
1046 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1050 * t3_link_start - apply link configuration to MAC/PHY
1051 * @phy: the PHY to setup
1052 * @mac: the MAC to setup
1053 * @lc: the requested link configuration
1055 * Set up a port's MAC and PHY according to a desired link configuration.
1056 * - If the PHY can auto-negotiate first decide what to advertise, then
1057 * enable/disable auto-negotiation as desired, and reset.
1058 * - If the PHY does not auto-negotiate just reset it.
1059 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1060 * otherwise do it later based on the outcome of auto-negotiation.
1062 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1064 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1067 if (lc->supported & SUPPORTED_Autoneg) {
1068 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1070 lc->advertising |= ADVERTISED_Asym_Pause;
1072 lc->advertising |= ADVERTISED_Pause;
1074 phy->ops->advertise(phy, lc->advertising);
1076 if (lc->autoneg == AUTONEG_DISABLE) {
1077 lc->speed = lc->requested_speed;
1078 lc->duplex = lc->requested_duplex;
1079 lc->fc = (unsigned char)fc;
1080 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1082 /* Also disables autoneg */
1083 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1084 phy->ops->reset(phy, 0);
1086 phy->ops->autoneg_enable(phy);
1088 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1089 lc->fc = (unsigned char)fc;
1090 phy->ops->reset(phy, 0);
1096 * t3_set_vlan_accel - control HW VLAN extraction
1097 * @adapter: the adapter
1098 * @ports: bitmap of adapter ports to operate on
1099 * @on: enable (1) or disable (0) HW VLAN extraction
1101 * Enables or disables HW extraction of VLAN tags for the given port.
1103 void t3_set_vlan_accel(adapter_t *adapter, unsigned int ports, int on)
1105 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1106 ports << S_VLANEXTRACTIONENABLE,
1107 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1111 unsigned int mask; /* bits to check in interrupt status */
1112 const char *msg; /* message to print or NULL */
1113 short stat_idx; /* stat counter to increment or -1 */
1114 unsigned short fatal:1; /* whether the condition reported is fatal */
1118 * t3_handle_intr_status - table driven interrupt handler
1119 * @adapter: the adapter that generated the interrupt
1120 * @reg: the interrupt status register to process
1121 * @mask: a mask to apply to the interrupt status
1122 * @acts: table of interrupt actions
1123 * @stats: statistics counters tracking interrupt occurences
1125 * A table driven interrupt handler that applies a set of masks to an
1126 * interrupt status word and performs the corresponding actions if the
1127 * interrupts described by the mask have occured. The actions include
1128 * optionally printing a warning or alert message, and optionally
1129 * incrementing a stat counter. The table is terminated by an entry
1130 * specifying mask 0. Returns the number of fatal interrupt conditions.
1132 static int t3_handle_intr_status(adapter_t *adapter, unsigned int reg,
1134 const struct intr_info *acts,
1135 unsigned long *stats)
1138 unsigned int status = t3_read_reg(adapter, reg) & mask;
1140 for ( ; acts->mask; ++acts) {
1141 if (!(status & acts->mask)) continue;
1144 CH_ALERT(adapter, "%s (0x%x)\n",
1145 acts->msg, status & acts->mask);
1146 } else if (acts->msg)
1147 CH_WARN(adapter, "%s (0x%x)\n",
1148 acts->msg, status & acts->mask);
1149 if (acts->stat_idx >= 0)
1150 stats[acts->stat_idx]++;
1152 if (status) /* clear processed interrupts */
1153 t3_write_reg(adapter, reg, status);
1157 #define SGE_INTR_MASK (F_RSPQDISABLED)
1158 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1159 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1161 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1162 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1163 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1164 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1165 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1166 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1167 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1168 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1169 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1170 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1171 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1172 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1173 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1174 V_BISTERR(M_BISTERR) | F_PEXERR)
1175 #define ULPRX_INTR_MASK F_PARERR
1176 #define ULPTX_INTR_MASK 0
1177 #define CPLSW_INTR_MASK (F_TP_FRAMING_ERROR | \
1178 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1179 F_ZERO_SWITCH_ERROR)
1180 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1181 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1182 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1183 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT)
1184 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1185 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1186 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1187 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1188 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1189 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1190 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1191 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1192 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1193 V_MCAPARERRENB(M_MCAPARERRENB))
1194 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1195 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1196 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1197 F_MPS0 | F_CPL_SWITCH)
1200 * Interrupt handler for the PCIX1 module.
1202 static void pci_intr_handler(adapter_t *adapter)
1204 static struct intr_info pcix1_intr_info[] = {
1205 { F_MSTDETPARERR, "PCI master detected parity error", -1, 1 },
1206 { F_SIGTARABT, "PCI signaled target abort", -1, 1 },
1207 { F_RCVTARABT, "PCI received target abort", -1, 1 },
1208 { F_RCVMSTABT, "PCI received master abort", -1, 1 },
1209 { F_SIGSYSERR, "PCI signaled system error", -1, 1 },
1210 { F_DETPARERR, "PCI detected parity error", -1, 1 },
1211 { F_SPLCMPDIS, "PCI split completion discarded", -1, 1 },
1212 { F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1 },
1213 { F_RCVSPLCMPERR, "PCI received split completion error", -1,
1215 { F_DETCORECCERR, "PCI correctable ECC error",
1216 STAT_PCI_CORR_ECC, 0 },
1217 { F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1 },
1218 { F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1219 { V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1221 { V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1223 { V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1225 { V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1230 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1231 pcix1_intr_info, adapter->irq_stats))
1232 t3_fatal_err(adapter);
1236 * Interrupt handler for the PCIE module.
1238 static void pcie_intr_handler(adapter_t *adapter)
1240 static struct intr_info pcie_intr_info[] = {
1241 { F_PEXERR, "PCI PEX error", -1, 1 },
1243 "PCI unexpected split completion DMA read error", -1, 1 },
1245 "PCI unexpected split completion DMA command error", -1, 1 },
1246 { F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1247 { F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1 },
1248 { F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1 },
1249 { F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1 },
1250 { V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1251 "PCI MSI-X table/PBA parity error", -1, 1 },
1252 { V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1 },
1256 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1257 pcie_intr_info, adapter->irq_stats))
1258 t3_fatal_err(adapter);
1262 * TP interrupt handler.
1264 static void tp_intr_handler(adapter_t *adapter)
1266 static struct intr_info tp_intr_info[] = {
1267 { 0xffffff, "TP parity error", -1, 1 },
1268 { 0x1000000, "TP out of Rx pages", -1, 1 },
1269 { 0x2000000, "TP out of Tx pages", -1, 1 },
1273 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1274 tp_intr_info, NULL))
1275 t3_fatal_err(adapter);
1279 * CIM interrupt handler.
1281 static void cim_intr_handler(adapter_t *adapter)
1283 static struct intr_info cim_intr_info[] = {
1284 { F_RSVDSPACEINT, "CIM reserved space write", -1, 1 },
1285 { F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1 },
1286 { F_FLASHRANGEINT, "CIM flash address out of range", -1, 1 },
1287 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1288 { F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1 },
1289 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1290 { F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1 },
1291 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1292 { F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1 },
1293 { F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1 },
1294 { F_BLKRDPLINT, "CIM block read from PL space", -1, 1 },
1295 { F_BLKWRPLINT, "CIM block write to PL space", -1, 1 },
1299 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1300 cim_intr_info, NULL))
1301 t3_fatal_err(adapter);
1305 * ULP RX interrupt handler.
1307 static void ulprx_intr_handler(adapter_t *adapter)
1309 static struct intr_info ulprx_intr_info[] = {
1310 { F_PARERR, "ULP RX parity error", -1, 1 },
1314 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1315 ulprx_intr_info, NULL))
1316 t3_fatal_err(adapter);
1320 * ULP TX interrupt handler.
1322 static void ulptx_intr_handler(adapter_t *adapter)
1324 static struct intr_info ulptx_intr_info[] = {
1325 { F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1326 STAT_ULP_CH0_PBL_OOB, 0 },
1327 { F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1328 STAT_ULP_CH1_PBL_OOB, 0 },
1332 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1333 ulptx_intr_info, adapter->irq_stats))
1334 t3_fatal_err(adapter);
1337 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1338 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1339 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1340 F_ICSPI1_TX_FRAMING_ERROR)
1341 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1342 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1343 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1344 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1347 * PM TX interrupt handler.
1349 static void pmtx_intr_handler(adapter_t *adapter)
1351 static struct intr_info pmtx_intr_info[] = {
1352 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1353 { ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1 },
1354 { OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1 },
1355 { V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1356 "PMTX ispi parity error", -1, 1 },
1357 { V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1358 "PMTX ospi parity error", -1, 1 },
1362 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1363 pmtx_intr_info, NULL))
1364 t3_fatal_err(adapter);
1367 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1368 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1369 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1370 F_IESPI1_TX_FRAMING_ERROR)
1371 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1372 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1373 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1374 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1377 * PM RX interrupt handler.
1379 static void pmrx_intr_handler(adapter_t *adapter)
1381 static struct intr_info pmrx_intr_info[] = {
1382 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1383 { IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1 },
1384 { OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1 },
1385 { V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1386 "PMRX ispi parity error", -1, 1 },
1387 { V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1388 "PMRX ospi parity error", -1, 1 },
1392 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1393 pmrx_intr_info, NULL))
1394 t3_fatal_err(adapter);
1398 * CPL switch interrupt handler.
1400 static void cplsw_intr_handler(adapter_t *adapter)
1402 static struct intr_info cplsw_intr_info[] = {
1403 // { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 },
1404 { F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1 },
1405 { F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1 },
1406 { F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1 },
1407 { F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1 },
1411 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1412 cplsw_intr_info, NULL))
1413 t3_fatal_err(adapter);
1417 * MPS interrupt handler.
1419 static void mps_intr_handler(adapter_t *adapter)
1421 static struct intr_info mps_intr_info[] = {
1422 { 0x1ff, "MPS parity error", -1, 1 },
1426 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1427 mps_intr_info, NULL))
1428 t3_fatal_err(adapter);
1431 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1434 * MC7 interrupt handler.
1436 static void mc7_intr_handler(struct mc7 *mc7)
1438 adapter_t *adapter = mc7->adapter;
1439 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1442 mc7->stats.corr_err++;
1443 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1444 "data 0x%x 0x%x 0x%x\n", mc7->name,
1445 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1446 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1447 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1448 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1452 mc7->stats.uncorr_err++;
1453 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1454 "data 0x%x 0x%x 0x%x\n", mc7->name,
1455 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1456 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1457 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1458 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1462 mc7->stats.parity_err++;
1463 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1464 mc7->name, G_PE(cause));
1470 if (adapter->params.rev > 0)
1471 addr = t3_read_reg(adapter,
1472 mc7->offset + A_MC7_ERR_ADDR);
1473 mc7->stats.addr_err++;
1474 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1478 if (cause & MC7_INTR_FATAL)
1479 t3_fatal_err(adapter);
1481 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1484 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1485 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1487 * XGMAC interrupt handler.
1489 static int mac_intr_handler(adapter_t *adap, unsigned int idx)
1491 struct cmac *mac = &adap->port[idx].mac;
1492 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1494 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1495 mac->stats.tx_fifo_parity_err++;
1496 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1498 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1499 mac->stats.rx_fifo_parity_err++;
1500 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1502 if (cause & F_TXFIFO_UNDERRUN)
1503 mac->stats.tx_fifo_urun++;
1504 if (cause & F_RXFIFO_OVERFLOW)
1505 mac->stats.rx_fifo_ovfl++;
1506 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1507 mac->stats.serdes_signal_loss++;
1508 if (cause & F_XAUIPCSCTCERR)
1509 mac->stats.xaui_pcs_ctc_err++;
1510 if (cause & F_XAUIPCSALIGNCHANGE)
1511 mac->stats.xaui_pcs_align_change++;
1513 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1514 if (cause & XGM_INTR_FATAL)
1520 * Interrupt handler for PHY events.
1522 int t3_phy_intr_handler(adapter_t *adapter)
1524 u32 mask, gpi = adapter_info(adapter)->gpio_intr;
1525 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1527 for_each_port(adapter, i) {
1528 struct port_info *p = &adapter->port[i];
1530 mask = gpi - (gpi & (gpi - 1));
1533 if (!(p->port_type->caps & SUPPORTED_IRQ))
1537 int phy_cause = p->phy.ops->intr_handler(&p->phy);
1539 if (phy_cause & cphy_cause_link_change)
1540 t3_link_changed(adapter, i);
1541 if (phy_cause & cphy_cause_fifo_error)
1542 p->phy.fifo_errors++;
1546 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1551 * T3 slow path (non-data) interrupt handler.
1553 int t3_slow_intr_handler(adapter_t *adapter)
1555 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1557 cause &= adapter->slow_intr_mask;
1561 printf("slow intr handler\n");
1562 if (cause & F_PCIM0) {
1563 if (is_pcie(adapter))
1564 pcie_intr_handler(adapter);
1566 pci_intr_handler(adapter);
1569 t3_sge_err_intr_handler(adapter);
1570 if (cause & F_MC7_PMRX)
1571 mc7_intr_handler(&adapter->pmrx);
1572 if (cause & F_MC7_PMTX)
1573 mc7_intr_handler(&adapter->pmtx);
1574 if (cause & F_MC7_CM)
1575 mc7_intr_handler(&adapter->cm);
1577 cim_intr_handler(adapter);
1579 tp_intr_handler(adapter);
1580 if (cause & F_ULP2_RX)
1581 ulprx_intr_handler(adapter);
1582 if (cause & F_ULP2_TX)
1583 ulptx_intr_handler(adapter);
1584 if (cause & F_PM1_RX)
1585 pmrx_intr_handler(adapter);
1586 if (cause & F_PM1_TX)
1587 pmtx_intr_handler(adapter);
1588 if (cause & F_CPL_SWITCH)
1589 cplsw_intr_handler(adapter);
1591 mps_intr_handler(adapter);
1593 t3_mc5_intr_handler(&adapter->mc5);
1594 if (cause & F_XGMAC0_0)
1595 mac_intr_handler(adapter, 0);
1596 if (cause & F_XGMAC0_1)
1597 mac_intr_handler(adapter, 1);
1598 if (cause & F_T3DBG)
1599 t3_os_ext_intr_handler(adapter);
1601 /* Clear the interrupts just processed. */
1602 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1603 (void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1608 * t3_intr_enable - enable interrupts
1609 * @adapter: the adapter whose interrupts should be enabled
1611 * Enable interrupts by setting the interrupt enable registers of the
1612 * various HW modules and then enabling the top-level interrupt
1615 void t3_intr_enable(adapter_t *adapter)
1617 static struct addr_val_pair intr_en_avp[] = {
1618 { A_SG_INT_ENABLE, SGE_INTR_MASK },
1619 { A_MC7_INT_ENABLE, MC7_INTR_MASK },
1620 { A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1622 { A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1624 { A_MC5_DB_INT_ENABLE, MC5_INTR_MASK },
1625 { A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK },
1626 { A_TP_INT_ENABLE, 0x3bfffff },
1627 { A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK },
1628 { A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK },
1629 { A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK },
1630 { A_MPS_INT_ENABLE, MPS_INTR_MASK },
1633 adapter->slow_intr_mask = PL_INTR_MASK;
1635 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1637 if (adapter->params.rev > 0) {
1638 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1639 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1640 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1641 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1642 F_PBL_BOUND_ERR_CH1);
1644 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1645 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1648 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW,
1649 adapter_info(adapter)->gpio_intr);
1650 t3_write_reg(adapter, A_T3DBG_INT_ENABLE,
1651 adapter_info(adapter)->gpio_intr);
1652 if (is_pcie(adapter)) {
1653 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1655 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1657 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1658 (void) t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1662 * t3_intr_disable - disable a card's interrupts
1663 * @adapter: the adapter whose interrupts should be disabled
1665 * Disable interrupts. We only disable the top-level interrupt
1666 * concentrator and the SGE data interrupts.
1668 void t3_intr_disable(adapter_t *adapter)
1670 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1671 (void) t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1672 adapter->slow_intr_mask = 0;
1676 * t3_intr_clear - clear all interrupts
1677 * @adapter: the adapter whose interrupts should be cleared
1679 * Clears all interrupts.
1681 void t3_intr_clear(adapter_t *adapter)
1683 static const unsigned int cause_reg_addr[] = {
1685 A_SG_RSPQ_FL_STATUS,
1688 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1689 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1690 A_CIM_HOST_INT_CAUSE,
1703 /* Clear PHY and MAC interrupts for each port. */
1704 for_each_port(adapter, i)
1705 t3_port_intr_clear(adapter, i);
1707 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1708 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1710 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1711 (void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1715 * t3_port_intr_enable - enable port-specific interrupts
1716 * @adapter: associated adapter
1717 * @idx: index of port whose interrupts should be enabled
1719 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1722 void t3_port_intr_enable(adapter_t *adapter, int idx)
1724 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
1725 adapter->port[idx].phy.ops->intr_enable(&adapter->port[idx].phy);
1729 * t3_port_intr_disable - disable port-specific interrupts
1730 * @adapter: associated adapter
1731 * @idx: index of port whose interrupts should be disabled
1733 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
1736 void t3_port_intr_disable(adapter_t *adapter, int idx)
1738 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
1739 adapter->port[idx].phy.ops->intr_disable(&adapter->port[idx].phy);
1743 * t3_port_intr_clear - clear port-specific interrupts
1744 * @adapter: associated adapter
1745 * @idx: index of port whose interrupts to clear
1747 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
1750 void t3_port_intr_clear(adapter_t *adapter, int idx)
1752 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
1753 adapter->port[idx].phy.ops->intr_clear(&adapter->port[idx].phy);
1758 * t3_sge_write_context - write an SGE context
1759 * @adapter: the adapter
1760 * @id: the context id
1761 * @type: the context type
1763 * Program an SGE context with the values already loaded in the
1764 * CONTEXT_DATA? registers.
1766 static int t3_sge_write_context(adapter_t *adapter, unsigned int id,
1769 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
1770 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
1771 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
1772 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
1773 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1774 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
1775 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1780 * t3_sge_init_ecntxt - initialize an SGE egress context
1781 * @adapter: the adapter to configure
1782 * @id: the context id
1783 * @gts_enable: whether to enable GTS for the context
1784 * @type: the egress context type
1785 * @respq: associated response queue
1786 * @base_addr: base address of queue
1787 * @size: number of queue entries
1789 * @gen: initial generation value for the context
1790 * @cidx: consumer pointer
1792 * Initialize an SGE egress context and make it ready for use. If the
1793 * platform allows concurrent context operations, the caller is
1794 * responsible for appropriate locking.
1796 int t3_sge_init_ecntxt(adapter_t *adapter, unsigned int id, int gts_enable,
1797 enum sge_context_type type, int respq, u64 base_addr,
1798 unsigned int size, unsigned int token, int gen,
1801 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
1803 if (base_addr & 0xfff) /* must be 4K aligned */
1805 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1809 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
1810 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
1811 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
1812 V_EC_BASE_LO((u32)base_addr & 0xffff));
1814 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, (u32)base_addr);
1816 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1817 V_EC_BASE_HI((u32)base_addr & 0xf) | V_EC_RESPQ(respq) |
1818 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
1820 return t3_sge_write_context(adapter, id, F_EGRESS);
1824 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
1825 * @adapter: the adapter to configure
1826 * @id: the context id
1827 * @gts_enable: whether to enable GTS for the context
1828 * @base_addr: base address of queue
1829 * @size: number of queue entries
1830 * @bsize: size of each buffer for this queue
1831 * @cong_thres: threshold to signal congestion to upstream producers
1832 * @gen: initial generation value for the context
1833 * @cidx: consumer pointer
1835 * Initialize an SGE free list context and make it ready for use. The
1836 * caller is responsible for ensuring only one context operation occurs
1839 int t3_sge_init_flcntxt(adapter_t *adapter, unsigned int id, int gts_enable,
1840 u64 base_addr, unsigned int size, unsigned int bsize,
1841 unsigned int cong_thres, int gen, unsigned int cidx)
1843 if (base_addr & 0xfff) /* must be 4K aligned */
1845 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1849 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, (u32)base_addr);
1851 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
1852 V_FL_BASE_HI((u32)base_addr) |
1853 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
1854 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
1855 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
1856 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
1857 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1858 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
1859 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
1860 return t3_sge_write_context(adapter, id, F_FREELIST);
1864 * t3_sge_init_rspcntxt - initialize an SGE response queue context
1865 * @adapter: the adapter to configure
1866 * @id: the context id
1867 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
1868 * @base_addr: base address of queue
1869 * @size: number of queue entries
1870 * @fl_thres: threshold for selecting the normal or jumbo free list
1871 * @gen: initial generation value for the context
1872 * @cidx: consumer pointer
1874 * Initialize an SGE response queue context and make it ready for use.
1875 * The caller is responsible for ensuring only one context operation
1878 int t3_sge_init_rspcntxt(adapter_t *adapter, unsigned int id, int irq_vec_idx,
1879 u64 base_addr, unsigned int size,
1880 unsigned int fl_thres, int gen, unsigned int cidx)
1882 unsigned int intr = 0;
1884 if (base_addr & 0xfff) /* must be 4K aligned */
1886 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1890 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
1892 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
1894 if (irq_vec_idx >= 0)
1895 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
1896 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
1897 V_CQ_BASE_HI((u32)base_addr) | intr | V_RQ_GEN(gen));
1898 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
1899 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
1903 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
1904 * @adapter: the adapter to configure
1905 * @id: the context id
1906 * @base_addr: base address of queue
1907 * @size: number of queue entries
1908 * @rspq: response queue for async notifications
1909 * @ovfl_mode: CQ overflow mode
1910 * @credits: completion queue credits
1911 * @credit_thres: the credit threshold
1913 * Initialize an SGE completion queue context and make it ready for use.
1914 * The caller is responsible for ensuring only one context operation
1917 int t3_sge_init_cqcntxt(adapter_t *adapter, unsigned int id, u64 base_addr,
1918 unsigned int size, int rspq, int ovfl_mode,
1919 unsigned int credits, unsigned int credit_thres)
1921 if (base_addr & 0xfff) /* must be 4K aligned */
1923 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1927 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
1928 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
1930 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
1931 V_CQ_BASE_HI((u32)base_addr) | V_CQ_RSPQ(rspq) |
1932 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode));
1933 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
1934 V_CQ_CREDIT_THRES(credit_thres));
1935 return t3_sge_write_context(adapter, id, F_CQ);
1939 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
1940 * @adapter: the adapter
1941 * @id: the egress context id
1942 * @enable: enable (1) or disable (0) the context
1944 * Enable or disable an SGE egress context. The caller is responsible for
1945 * ensuring only one context operation occurs at a time.
1947 int t3_sge_enable_ecntxt(adapter_t *adapter, unsigned int id, int enable)
1949 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1952 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
1953 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
1954 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
1955 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
1956 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
1957 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1958 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
1959 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1964 * t3_sge_disable_fl - disable an SGE free-buffer list
1965 * @adapter: the adapter
1966 * @id: the free list context id
1968 * Disable an SGE free-buffer list. The caller is responsible for
1969 * ensuring only one context operation occurs at a time.
1971 int t3_sge_disable_fl(adapter_t *adapter, unsigned int id)
1973 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1976 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
1977 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
1978 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
1979 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
1980 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
1981 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1982 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
1983 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1988 * t3_sge_disable_rspcntxt - disable an SGE response queue
1989 * @adapter: the adapter
1990 * @id: the response queue context id
1992 * Disable an SGE response queue. The caller is responsible for
1993 * ensuring only one context operation occurs at a time.
1995 int t3_sge_disable_rspcntxt(adapter_t *adapter, unsigned int id)
1997 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2000 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2001 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2002 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2003 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2004 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2005 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2006 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2007 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2012 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2013 * @adapter: the adapter
2014 * @id: the completion queue context id
2016 * Disable an SGE completion queue. The caller is responsible for
2017 * ensuring only one context operation occurs at a time.
2019 int t3_sge_disable_cqcntxt(adapter_t *adapter, unsigned int id)
2021 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2024 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2025 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2026 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2027 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2028 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2029 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2030 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2031 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2036 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2037 * @adapter: the adapter
2038 * @id: the context id
2039 * @op: the operation to perform
2041 * Perform the selected operation on an SGE completion queue context.
2042 * The caller is responsible for ensuring only one context operation
2045 int t3_sge_cqcntxt_op(adapter_t *adapter, unsigned int id, unsigned int op,
2046 unsigned int credits)
2050 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2053 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2054 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2055 V_CONTEXT(id) | F_CQ);
2056 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2060 if (op >= 2 && op < 7) {
2061 if (adapter->params.rev > 0)
2062 return G_CQ_INDEX(val);
2064 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2065 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2066 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2067 F_CONTEXT_CMD_BUSY, 0, 5, 1))
2069 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2075 * t3_sge_read_context - read an SGE context
2076 * @type: the context type
2077 * @adapter: the adapter
2078 * @id: the context id
2079 * @data: holds the retrieved context
2081 * Read an SGE egress context. The caller is responsible for ensuring
2082 * only one context operation occurs at a time.
2084 static int t3_sge_read_context(unsigned int type, adapter_t *adapter,
2085 unsigned int id, u32 data[4])
2087 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2090 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2091 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2092 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2095 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2096 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2097 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2098 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2103 * t3_sge_read_ecntxt - read an SGE egress context
2104 * @adapter: the adapter
2105 * @id: the context id
2106 * @data: holds the retrieved context
2108 * Read an SGE egress context. The caller is responsible for ensuring
2109 * only one context operation occurs at a time.
2111 int t3_sge_read_ecntxt(adapter_t *adapter, unsigned int id, u32 data[4])
2115 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2119 * t3_sge_read_cq - read an SGE CQ context
2120 * @adapter: the adapter
2121 * @id: the context id
2122 * @data: holds the retrieved context
2124 * Read an SGE CQ context. The caller is responsible for ensuring
2125 * only one context operation occurs at a time.
2127 int t3_sge_read_cq(adapter_t *adapter, unsigned int id, u32 data[4])
2131 return t3_sge_read_context(F_CQ, adapter, id, data);
2135 * t3_sge_read_fl - read an SGE free-list context
2136 * @adapter: the adapter
2137 * @id: the context id
2138 * @data: holds the retrieved context
2140 * Read an SGE free-list context. The caller is responsible for ensuring
2141 * only one context operation occurs at a time.
2143 int t3_sge_read_fl(adapter_t *adapter, unsigned int id, u32 data[4])
2145 if (id >= SGE_QSETS * 2)
2147 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2151 * t3_sge_read_rspq - read an SGE response queue context
2152 * @adapter: the adapter
2153 * @id: the context id
2154 * @data: holds the retrieved context
2156 * Read an SGE response queue context. The caller is responsible for
2157 * ensuring only one context operation occurs at a time.
2159 int t3_sge_read_rspq(adapter_t *adapter, unsigned int id, u32 data[4])
2161 if (id >= SGE_QSETS)
2163 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2167 * t3_config_rss - configure Rx packet steering
2168 * @adapter: the adapter
2169 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2170 * @cpus: values for the CPU lookup table (0xff terminated)
2171 * @rspq: values for the response queue lookup table (0xffff terminated)
2173 * Programs the receive packet steering logic. @cpus and @rspq provide
2174 * the values for the CPU and response queue lookup tables. If they
2175 * provide fewer values than the size of the tables the supplied values
2176 * are used repeatedly until the tables are fully populated.
2178 void t3_config_rss(adapter_t *adapter, unsigned int rss_config, const u8 *cpus,
2181 int i, j, cpu_idx = 0, q_idx = 0;
2184 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2187 for (j = 0; j < 2; ++j) {
2188 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2189 if (cpus[cpu_idx] == 0xff)
2192 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2196 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2197 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2198 (i << 16) | rspq[q_idx++]);
2199 if (rspq[q_idx] == 0xffff)
2203 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2207 * t3_read_rss - read the contents of the RSS tables
2208 * @adapter: the adapter
2209 * @lkup: holds the contents of the RSS lookup table
2210 * @map: holds the contents of the RSS map table
2212 * Reads the contents of the receive packet steering tables.
2214 int t3_read_rss(adapter_t *adapter, u8 *lkup, u16 *map)
2220 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2221 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2223 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2224 if (!(val & 0x80000000))
2227 *lkup++ = (u8)(val >> 8);
2231 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2232 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2234 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2235 if (!(val & 0x80000000))
2243 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2244 * @adap: the adapter
2245 * @enable: 1 to select offload mode, 0 for regular NIC
2247 * Switches TP to NIC/offload mode.
2249 void t3_tp_set_offload_mode(adapter_t *adap, int enable)
2251 if (is_offload(adap) || !enable)
2252 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2253 V_NICMODE(!enable));
2257 * pm_num_pages - calculate the number of pages of the payload memory
2258 * @mem_size: the size of the payload memory
2259 * @pg_size: the size of each payload memory page
2261 * Calculate the number of pages, each of the given size, that fit in a
2262 * memory of the specified size, respecting the HW requirement that the
2263 * number of pages must be a multiple of 24.
2265 static inline unsigned int pm_num_pages(unsigned int mem_size,
2266 unsigned int pg_size)
2268 unsigned int n = mem_size / pg_size;
2273 #define mem_region(adap, start, size, reg) \
2274 t3_write_reg((adap), A_ ## reg, (start)); \
2278 * partition_mem - partition memory and configure TP memory settings
2279 * @adap: the adapter
2280 * @p: the TP parameters
2282 * Partitions context and payload memory and configures TP's memory
2285 static void partition_mem(adapter_t *adap, const struct tp_params *p)
2287 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2288 unsigned int timers = 0, timers_shift = 22;
2290 if (adap->params.rev > 0) {
2291 if (tids <= 16 * 1024) {
2294 } else if (tids <= 64 * 1024) {
2297 } else if (tids <= 256 * 1024) {
2303 t3_write_reg(adap, A_TP_PMM_SIZE,
2304 p->chan_rx_size | (p->chan_tx_size >> 16));
2306 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2307 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2308 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2309 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2310 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2312 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2313 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2314 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2316 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2317 /* Add a bit of headroom and make multiple of 24 */
2319 pstructs -= pstructs % 24;
2320 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2322 m = tids * TCB_SIZE;
2323 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2324 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2325 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2326 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2327 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2328 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2329 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2330 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2332 m = (m + 4095) & ~0xfff;
2333 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2334 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2336 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2337 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2338 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2340 adap->params.mc5.nservers += m - tids;
2343 static inline void tp_wr_indirect(adapter_t *adap, unsigned int addr, u32 val)
2345 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2346 t3_write_reg(adap, A_TP_PIO_DATA, val);
2349 static void tp_config(adapter_t *adap, const struct tp_params *p)
2351 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2352 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2353 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2354 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2355 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2356 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2357 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2358 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2359 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2360 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2361 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_IPV6ENABLE | F_NICMODE,
2362 F_IPV6ENABLE | F_NICMODE);
2363 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2364 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2365 t3_set_reg_field(adap, A_TP_PARA_REG6,
2366 adap->params.rev > 0 ? F_ENABLEESND : F_T3A_ENABLEESND,
2368 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2369 F_ENABLEEPCMDAFULL | F_ENABLEOCSPIFULL,
2370 F_TXDEFERENABLE | F_HEARBEATDACK | F_TXCONGESTIONMODE |
2371 F_RXCONGESTIONMODE);
2372 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, 0);
2374 if (adap->params.rev > 0) {
2375 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2376 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2378 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2379 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2381 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2383 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0x12121212);
2384 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0x12121212);
2385 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0x1212);
2388 /* Desired TP timer resolution in usec */
2389 #define TP_TMR_RES 50
2391 /* TCP timer values in ms */
2392 #define TP_DACK_TIMER 50
2393 #define TP_RTO_MIN 250
2396 * tp_set_timers - set TP timing parameters
2397 * @adap: the adapter to set
2398 * @core_clk: the core clock frequency in Hz
2400 * Set TP's timing parameters, such as the various timer resolutions and
2401 * the TCP timer values.
2403 static void tp_set_timers(adapter_t *adap, unsigned int core_clk)
2405 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2406 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2407 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2408 unsigned int tps = core_clk >> tre;
2410 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2411 V_DELAYEDACKRESOLUTION(dack_re) |
2412 V_TIMESTAMPRESOLUTION(tstamp_re));
2413 t3_write_reg(adap, A_TP_DACK_TIMER,
2414 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2415 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2416 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2417 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2418 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2419 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2420 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2421 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2424 #define SECONDS * tps
2426 t3_write_reg(adap, A_TP_MSL,
2427 adap->params.rev > 0 ? 0 : 2 SECONDS);
2428 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2429 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2430 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2431 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2432 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2433 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2434 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2435 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2440 #ifdef CONFIG_CHELSIO_T3_CORE
2442 * t3_tp_set_coalescing_size - set receive coalescing size
2443 * @adap: the adapter
2444 * @size: the receive coalescing size
2445 * @psh: whether a set PSH bit should deliver coalesced data
2447 * Set the receive coalescing size and PSH bit handling.
2449 int t3_tp_set_coalescing_size(adapter_t *adap, unsigned int size, int psh)
2453 if (size > MAX_RX_COALESCING_LEN)
2456 val = t3_read_reg(adap, A_TP_PARA_REG3);
2457 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2460 val |= F_RXCOALESCEENABLE;
2462 val |= F_RXCOALESCEPSHEN;
2463 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2464 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2466 t3_write_reg(adap, A_TP_PARA_REG3, val);
2471 * t3_tp_set_max_rxsize - set the max receive size
2472 * @adap: the adapter
2473 * @size: the max receive size
2475 * Set TP's max receive size. This is the limit that applies when
2476 * receive coalescing is disabled.
2478 void t3_tp_set_max_rxsize(adapter_t *adap, unsigned int size)
2480 t3_write_reg(adap, A_TP_PARA_REG7,
2481 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2484 static void __devinit init_mtus(unsigned short mtus[])
2487 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2488 * it can accomodate max size TCP/IP headers when SACK and timestamps
2489 * are enabled and still have at least 8 bytes of payload.
2510 * Initial congestion control parameters.
2512 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
2514 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2539 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2542 b[13] = b[14] = b[15] = b[16] = 3;
2543 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2544 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2549 /* The minimum additive increment value for the congestion control table */
2550 #define CC_MIN_INCR 2U
2553 * t3_load_mtus - write the MTU and congestion control HW tables
2554 * @adap: the adapter
2555 * @mtus: the unrestricted values for the MTU table
2556 * @alphs: the values for the congestion control alpha parameter
2557 * @beta: the values for the congestion control beta parameter
2558 * @mtu_cap: the maximum permitted effective MTU
2560 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2561 * Update the high-speed congestion control table with the supplied alpha,
2564 void t3_load_mtus(adapter_t *adap, unsigned short mtus[NMTUS],
2565 unsigned short alpha[NCCTRL_WIN],
2566 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2568 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2569 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2570 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2571 28672, 40960, 57344, 81920, 114688, 163840, 229376 };
2575 for (i = 0; i < NMTUS; ++i) {
2576 unsigned int mtu = min(mtus[i], mtu_cap);
2577 unsigned int log2 = fls(mtu);
2579 if (!(mtu & ((1 << log2) >> 2))) /* round */
2581 t3_write_reg(adap, A_TP_MTU_TABLE,
2582 (i << 24) | (log2 << 16) | mtu);
2584 for (w = 0; w < NCCTRL_WIN; ++w) {
2587 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2590 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2591 (w << 16) | (beta[w] << 13) | inc);
2597 * t3_read_hw_mtus - returns the values in the HW MTU table
2598 * @adap: the adapter
2599 * @mtus: where to store the HW MTU values
2601 * Reads the HW MTU table.
2603 void t3_read_hw_mtus(adapter_t *adap, unsigned short mtus[NMTUS])
2607 for (i = 0; i < NMTUS; ++i) {
2610 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2611 val = t3_read_reg(adap, A_TP_MTU_TABLE);
2612 mtus[i] = val & 0x3fff;
2617 * t3_get_cong_cntl_tab - reads the congestion control table
2618 * @adap: the adapter
2619 * @incr: where to store the alpha values
2621 * Reads the additive increments programmed into the HW congestion
2624 void t3_get_cong_cntl_tab(adapter_t *adap,
2625 unsigned short incr[NMTUS][NCCTRL_WIN])
2627 unsigned int mtu, w;
2629 for (mtu = 0; mtu < NMTUS; ++mtu)
2630 for (w = 0; w < NCCTRL_WIN; ++w) {
2631 t3_write_reg(adap, A_TP_CCTRL_TABLE,
2632 0xffff0000 | (mtu << 5) | w);
2633 incr[mtu][w] = (unsigned short)t3_read_reg(adap,
2634 A_TP_CCTRL_TABLE) & 0x1fff;
2639 * t3_tp_get_mib_stats - read TP's MIB counters
2640 * @adap: the adapter
2641 * @tps: holds the returned counter values
2643 * Returns the values of TP's MIB counters.
2645 void t3_tp_get_mib_stats(adapter_t *adap, struct tp_mib_stats *tps)
2647 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *)tps,
2648 sizeof(*tps) / sizeof(u32), 0);
2651 #define ulp_region(adap, name, start, len) \
2652 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2653 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2654 (start) + (len) - 1); \
2657 #define ulptx_region(adap, name, start, len) \
2658 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2659 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2660 (start) + (len) - 1)
2662 static void ulp_config(adapter_t *adap, const struct tp_params *p)
2664 unsigned int m = p->chan_rx_size;
2666 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2667 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2668 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2669 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2670 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2671 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2672 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2673 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2677 void t3_config_trace_filter(adapter_t *adapter, const struct trace_params *tp,
2678 int filter_index, int invert, int enable)
2680 u32 addr, key[4], mask[4];
2682 key[0] = tp->sport | (tp->sip << 16);
2683 key[1] = (tp->sip >> 16) | (tp->dport << 16);
2685 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2687 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2688 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2689 mask[2] = tp->dip_mask;
2690 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2693 key[3] |= (1 << 29);
2695 key[3] |= (1 << 28);
2697 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2698 tp_wr_indirect(adapter, addr++, key[0]);
2699 tp_wr_indirect(adapter, addr++, mask[0]);
2700 tp_wr_indirect(adapter, addr++, key[1]);
2701 tp_wr_indirect(adapter, addr++, mask[1]);
2702 tp_wr_indirect(adapter, addr++, key[2]);
2703 tp_wr_indirect(adapter, addr++, mask[2]);
2704 tp_wr_indirect(adapter, addr++, key[3]);
2705 tp_wr_indirect(adapter, addr, mask[3]);
2706 (void) t3_read_reg(adapter, A_TP_PIO_DATA);
2710 * t3_config_sched - configure a HW traffic scheduler
2711 * @adap: the adapter
2712 * @kbps: target rate in Kbps
2713 * @sched: the scheduler index
2715 * Configure a HW scheduler for the target rate
2717 int t3_config_sched(adapter_t *adap, unsigned int kbps, int sched)
2719 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2720 unsigned int clk = adap->params.vpd.cclk * 1000;
2721 unsigned int selected_cpt = 0, selected_bpt = 0;
2724 kbps *= 125; /* -> bytes */
2725 for (cpt = 1; cpt <= 255; cpt++) {
2727 bpt = (kbps + tps / 2) / tps;
2728 if (bpt > 0 && bpt <= 255) {
2730 delta = v >= kbps ? v - kbps : kbps - v;
2731 if (delta <= mindelta) {
2736 } else if (selected_cpt)
2742 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
2743 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
2744 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
2746 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
2748 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
2749 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
2753 static int tp_init(adapter_t *adap, const struct tp_params *p)
2758 t3_set_vlan_accel(adap, 3, 0);
2760 if (is_offload(adap)) {
2761 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
2762 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
2763 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
2766 CH_ERR(adap, "TP initialization timed out\n");
2770 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
2774 int t3_mps_set_active_ports(adapter_t *adap, unsigned int port_mask)
2776 if (port_mask & ~((1 << adap->params.nports) - 1))
2778 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
2779 port_mask << S_PORT0ACTIVE);
2784 * Perform the bits of HW initialization that are dependent on the number
2785 * of available ports.
2787 static void init_hw_for_avail_ports(adapter_t *adap, int nports)
2792 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
2793 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
2794 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
2795 F_PORT0ACTIVE | F_ENFORCEPKT);
2796 t3_write_reg(adap, A_PM1_TX_CFG, 0xc000c000);
2798 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
2799 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
2800 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
2801 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
2802 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
2803 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
2805 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
2806 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
2807 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
2808 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
2809 for (i = 0; i < 16; i++)
2810 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
2811 (i << 16) | 0x1010);
2815 static int calibrate_xgm(adapter_t *adapter)
2817 if (uses_xaui(adapter)) {
2820 for (i = 0; i < 5; ++i) {
2821 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
2822 (void) t3_read_reg(adapter, A_XGM_XAUI_IMP);
2824 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
2825 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
2826 t3_write_reg(adapter, A_XGM_XAUI_IMP,
2827 V_XAUIIMP(G_CALIMP(v) >> 2));
2831 CH_ERR(adapter, "MAC calibration failed\n");
2834 t3_write_reg(adapter, A_XGM_RGMII_IMP,
2835 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2836 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
2837 F_XGM_IMPSETUPDATE);
2842 static void calibrate_xgm_t3b(adapter_t *adapter)
2844 if (!uses_xaui(adapter)) {
2845 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
2846 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2847 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
2848 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
2849 F_XGM_IMPSETUPDATE);
2850 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
2852 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
2853 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
2857 struct mc7_timing_params {
2858 unsigned char ActToPreDly;
2859 unsigned char ActToRdWrDly;
2860 unsigned char PreCyc;
2861 unsigned char RefCyc[5];
2862 unsigned char BkCyc;
2863 unsigned char WrToRdDly;
2864 unsigned char RdToWrDly;
2868 * Write a value to a register and check that the write completed. These
2869 * writes normally complete in a cycle or two, so one read should suffice.
2870 * The very first read exists to flush the posted write to the device.
2872 static int wrreg_wait(adapter_t *adapter, unsigned int addr, u32 val)
2874 t3_write_reg(adapter, addr, val);
2875 (void) t3_read_reg(adapter, addr); /* flush */
2876 if (!(t3_read_reg(adapter, addr) & F_BUSY))
2878 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
2882 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
2884 static const unsigned int mc7_mode[] = {
2885 0x632, 0x642, 0x652, 0x432, 0x442
2887 static const struct mc7_timing_params mc7_timings[] = {
2888 { 12, 3, 4, { 20, 28, 34, 52, 0 }, 15, 6, 4 },
2889 { 12, 4, 5, { 20, 28, 34, 52, 0 }, 16, 7, 4 },
2890 { 12, 5, 6, { 20, 28, 34, 52, 0 }, 17, 8, 4 },
2891 { 9, 3, 4, { 15, 21, 26, 39, 0 }, 12, 6, 4 },
2892 { 9, 4, 5, { 15, 21, 26, 39, 0 }, 13, 7, 4 }
2896 unsigned int width, density, slow, attempts;
2897 adapter_t *adapter = mc7->adapter;
2898 const struct mc7_timing_params *p = &mc7_timings[mem_type];
2900 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
2901 slow = val & F_SLOW;
2902 width = G_WIDTH(val);
2903 density = G_DEN(val);
2905 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
2906 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
2910 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
2911 (void) t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
2913 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
2914 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
2915 CH_ERR(adapter, "%s MC7 calibration timed out\n",
2921 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
2922 V_ACTTOPREDLY(p->ActToPreDly) |
2923 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
2924 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
2925 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
2927 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
2928 val | F_CLKEN | F_TERM150);
2929 (void) t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
2932 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
2937 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
2938 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
2939 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
2940 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
2944 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
2945 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL,
2950 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
2951 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
2952 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
2953 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
2954 mc7_mode[mem_type]) ||
2955 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
2956 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
2959 /* clock value is in KHz */
2960 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
2961 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
2963 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
2964 F_PERREFEN | V_PREREFDIV(mc7_clock));
2965 (void) t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
2967 t3_write_reg(adapter, mc7->offset + A_MC7_ECC,
2968 F_ECCGENEN | F_ECCCHKEN);
2969 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
2970 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
2971 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
2972 (mc7->size << width) - 1);
2973 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
2974 (void) t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
2979 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
2980 } while ((val & F_BUSY) && --attempts);
2982 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
2986 /* Enable normal memory accesses. */
2987 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
2994 static void config_pcie(adapter_t *adap)
2996 static const u16 ack_lat[4][6] = {
2997 { 237, 416, 559, 1071, 2095, 4143 },
2998 { 128, 217, 289, 545, 1057, 2081 },
2999 { 73, 118, 154, 282, 538, 1050 },
3000 { 67, 107, 86, 150, 278, 534 }
3002 static const u16 rpl_tmr[4][6] = {
3003 { 711, 1248, 1677, 3213, 6285, 12429 },
3004 { 384, 651, 867, 1635, 3171, 6243 },
3005 { 219, 354, 462, 846, 1614, 3150 },
3006 { 201, 321, 258, 450, 834, 1602 }
3010 unsigned int log2_width, pldsize;
3011 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3013 t3_os_pci_read_config_2(adap,
3014 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3016 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3018 t3_os_pci_read_config_2(adap,
3019 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3022 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3023 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3024 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3025 log2_width = fls(adap->params.pci.width) - 1;
3026 acklat = ack_lat[log2_width][pldsize];
3027 if (val & 1) /* check LOsEnable */
3028 acklat += fst_trn_tx * 4;
3029 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3031 if (adap->params.rev == 0)
3032 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3033 V_T3A_ACKLAT(M_T3A_ACKLAT),
3034 V_T3A_ACKLAT(acklat));
3036 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3039 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3040 V_REPLAYLMT(rpllmt));
3042 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3043 t3_set_reg_field(adap, A_PCIE_CFG, F_PCIE_CLIDECEN, F_PCIE_CLIDECEN);
3047 * Initialize and configure T3 HW modules. This performs the
3048 * initialization steps that need to be done once after a card is reset.
3049 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3051 * fw_params are passed to FW and their value is platform dependent. Only the
3052 * top 8 bits are available for use, the rest must be 0.
3054 int t3_init_hw(adapter_t *adapter, u32 fw_params)
3056 int err = -EIO, attempts = 100;
3057 const struct vpd_params *vpd = &adapter->params.vpd;
3059 if (adapter->params.rev > 0)
3060 calibrate_xgm_t3b(adapter);
3061 else if (calibrate_xgm(adapter))
3065 partition_mem(adapter, &adapter->params.tp);
3067 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3068 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3069 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3070 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3071 adapter->params.mc5.nfilters,
3072 adapter->params.mc5.nroutes))
3076 if (tp_init(adapter, &adapter->params.tp))
3079 #ifdef CONFIG_CHELSIO_T3_CORE
3080 t3_tp_set_coalescing_size(adapter,
3081 min(adapter->params.sge.max_pkt_size,
3082 MAX_RX_COALESCING_LEN), 1);
3083 t3_tp_set_max_rxsize(adapter,
3084 min(adapter->params.sge.max_pkt_size, 16384U));
3085 ulp_config(adapter, &adapter->params.tp);
3087 if (is_pcie(adapter))
3088 config_pcie(adapter);
3090 t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN);
3092 t3_write_reg(adapter, A_PM1_RX_CFG, 0xf000f000);
3093 init_hw_for_avail_ports(adapter, adapter->params.nports);
3094 t3_sge_init(adapter, &adapter->params.sge);
3096 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3097 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3098 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3099 (void) t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3101 do { /* wait for uP to initialize */
3103 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3113 * get_pci_mode - determine a card's PCI mode
3114 * @adapter: the adapter
3115 * @p: where to store the PCI settings
3117 * Determines a card's PCI mode and associated parameters, such as speed
3120 static void __devinit get_pci_mode(adapter_t *adapter, struct pci_params *p)
3122 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3123 u32 pci_mode, pcie_cap;
3125 pcie_cap = t3_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
3129 p->variant = PCI_VARIANT_PCIE;
3130 p->pcie_cap_addr = pcie_cap;
3131 t3_os_pci_read_config_2(adapter, pcie_cap + PCI_EXP_LNKSTA,
3133 p->width = (val >> 4) & 0x3f;
3137 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3138 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3139 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3140 pci_mode = G_PCIXINITPAT(pci_mode);
3142 p->variant = PCI_VARIANT_PCI;
3143 else if (pci_mode < 4)
3144 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3145 else if (pci_mode < 8)
3146 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3148 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3152 * init_link_config - initialize a link's SW state
3153 * @lc: structure holding the link state
3154 * @ai: information about the current card
3156 * Initializes the SW state maintained for each link, including the link's
3157 * capabilities and default speed/duplex/flow-control/autonegotiation
3160 static void __devinit init_link_config(struct link_config *lc,
3163 lc->supported = caps;
3164 lc->requested_speed = lc->speed = SPEED_INVALID;
3165 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3166 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3167 if (lc->supported & SUPPORTED_Autoneg) {
3168 lc->advertising = lc->supported;
3169 lc->autoneg = AUTONEG_ENABLE;
3170 lc->requested_fc |= PAUSE_AUTONEG;
3172 lc->advertising = 0;
3173 lc->autoneg = AUTONEG_DISABLE;
3178 * mc7_calc_size - calculate MC7 memory size
3179 * @cfg: the MC7 configuration
3181 * Calculates the size of an MC7 memory in bytes from the value of its
3182 * configuration register.
3184 static unsigned int __devinit mc7_calc_size(u32 cfg)
3186 unsigned int width = G_WIDTH(cfg);
3187 unsigned int banks = !!(cfg & F_BKS) + 1;
3188 unsigned int org = !!(cfg & F_ORG) + 1;
3189 unsigned int density = G_DEN(cfg);
3190 unsigned int MBs = ((256 << density) * banks) / (org << width);
3195 static void __devinit mc7_prep(adapter_t *adapter, struct mc7 *mc7,
3196 unsigned int base_addr, const char *name)
3200 mc7->adapter = adapter;
3202 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3203 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3204 mc7->size = mc7_calc_size(cfg);
3205 mc7->width = G_WIDTH(cfg);
3208 void mac_prep(struct cmac *mac, adapter_t *adapter, int index)
3210 mac->adapter = adapter;
3211 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3214 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3215 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3216 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3217 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3222 void early_hw_init(adapter_t *adapter, const struct adapter_info *ai)
3224 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3226 mi1_init(adapter, ai);
3227 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3228 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3229 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3230 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3232 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3235 /* Enable MAC clocks so we can access the registers */
3236 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3237 (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
3239 val |= F_CLKDIVRESET_;
3240 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3241 (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
3242 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3243 (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
3247 * Reset the adapter. PCIe cards lose their config space during reset, PCI-X
3250 int t3_reset_adapter(adapter_t *adapter)
3255 if (is_pcie(adapter))
3256 t3_os_pci_save_state(adapter);
3257 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3260 * Delay. Give Some time to device to reset fully.
3261 * XXX The delay time should be modified.
3263 for (i = 0; i < 10; i++) {
3265 t3_os_pci_read_config_2(adapter, 0x00, &devid);
3266 if (devid == 0x1425)
3270 if (devid != 0x1425)
3273 if (is_pcie(adapter))
3274 t3_os_pci_restore_state(adapter);
3279 * Initialize adapter SW state for the various HW modules, set initial values
3280 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3283 int __devinit t3_prep_adapter(adapter_t *adapter,
3284 const struct adapter_info *ai, int reset)
3287 unsigned int i, j = 0;
3289 get_pci_mode(adapter, &adapter->params.pci);
3291 adapter->params.info = ai;
3292 adapter->params.nports = ai->nports;
3293 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3294 adapter->params.linkpoll_period = 0;
3295 adapter->params.stats_update_period = is_10G(adapter) ?
3296 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3297 adapter->params.pci.vpd_cap_addr =
3298 t3_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
3300 ret = get_vpd_params(adapter, &adapter->params.vpd);
3304 if (reset && t3_reset_adapter(adapter))
3307 t3_sge_prep(adapter, &adapter->params.sge);
3309 if (adapter->params.vpd.mclk) {
3310 struct tp_params *p = &adapter->params.tp;
3312 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3313 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3314 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3316 p->nchan = ai->nports;
3317 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3318 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3319 p->cm_size = t3_mc7_size(&adapter->cm);
3320 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3321 p->chan_tx_size = p->pmtx_size / p->nchan;
3322 p->rx_pg_size = 64 * 1024;
3323 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3324 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3325 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3326 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3327 adapter->params.rev > 0 ? 12 : 6;
3329 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3330 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3331 DEFAULT_NFILTERS : 0;
3332 adapter->params.mc5.nroutes = 0;
3333 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3335 #ifdef CONFIG_CHELSIO_T3_CORE
3336 init_mtus(adapter->params.mtus);
3337 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3341 early_hw_init(adapter, ai);
3343 for_each_port(adapter, i) {
3345 struct port_info *p = &adapter->port[i];
3347 while (!adapter->params.vpd.port_type[j])
3350 p->port_type = &port_types[adapter->params.vpd.port_type[j]];
3351 p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3353 mac_prep(&p->mac, adapter, j);
3357 * The VPD EEPROM stores the base Ethernet address for the
3358 * card. A port's address is derived from the base by adding
3359 * the port's index to the base's low octet.
3361 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3362 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3364 t3_os_set_hw_addr(adapter, i, hw_addr);
3365 init_link_config(&p->link_config, p->port_type->caps);
3366 p->phy.ops->power_down(&p->phy, 1);
3367 if (!(p->port_type->caps & SUPPORTED_IRQ))
3368 adapter->params.linkpoll_period = 10;
3374 void t3_led_ready(adapter_t *adapter)
3376 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3380 void t3_port_failover(adapter_t *adapter, int port)
3384 val = port ? F_PORT1ACTIVE : F_PORT0ACTIVE;
3385 t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
3389 void t3_failover_done(adapter_t *adapter, int port)
3391 t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
3392 F_PORT0ACTIVE | F_PORT1ACTIVE);
3395 void t3_failover_clear(adapter_t *adapter)
3397 t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
3398 F_PORT0ACTIVE | F_PORT1ACTIVE);