1 /**************************************************************************
3 Copyright (c) 2007, Chelsio Inc.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
28 ***************************************************************************/
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
35 #include <cxgb_include.h>
37 #include <dev/cxgb/cxgb_include.h>
41 #define msleep t3_os_sleep
44 * t3_wait_op_done_val - wait until an operation is completed
45 * @adapter: the adapter performing the operation
46 * @reg: the register to check for completion
47 * @mask: a single-bit field within @reg that indicates completion
48 * @polarity: the value of the field when the operation is completed
49 * @attempts: number of check iterations
50 * @delay: delay in usecs between iterations
51 * @valp: where to store the value of the register at completion time
53 * Wait until an operation is completed by checking a bit in a register
54 * up to @attempts times. If @valp is not NULL the value of the register
55 * at the time it indicated completion is stored there. Returns 0 if the
56 * operation completes and -EAGAIN otherwise.
58 int t3_wait_op_done_val(adapter_t *adapter, int reg, u32 mask, int polarity,
59 int attempts, int delay, u32 *valp)
62 u32 val = t3_read_reg(adapter, reg);
64 if (!!(val & mask) == polarity) {
77 * t3_write_regs - write a bunch of registers
78 * @adapter: the adapter to program
79 * @p: an array of register address/register value pairs
80 * @n: the number of address/value pairs
81 * @offset: register address offset
83 * Takes an array of register address/register value pairs and writes each
84 * value to the corresponding register. Register addresses are adjusted
85 * by the supplied offset.
87 void t3_write_regs(adapter_t *adapter, const struct addr_val_pair *p, int n,
91 t3_write_reg(adapter, p->reg_addr + offset, p->val);
97 * t3_set_reg_field - set a register field to a value
98 * @adapter: the adapter to program
99 * @addr: the register address
100 * @mask: specifies the portion of the register to modify
101 * @val: the new value for the register field
103 * Sets a register field specified by the supplied mask to the
106 void t3_set_reg_field(adapter_t *adapter, unsigned int addr, u32 mask, u32 val)
108 u32 v = t3_read_reg(adapter, addr) & ~mask;
110 t3_write_reg(adapter, addr, v | val);
111 (void) t3_read_reg(adapter, addr); /* flush */
115 * t3_read_indirect - read indirectly addressed registers
117 * @addr_reg: register holding the indirect address
118 * @data_reg: register holding the value of the indirect register
119 * @vals: where the read register values are stored
120 * @start_idx: index of first indirect register to read
121 * @nregs: how many indirect registers to read
123 * Reads registers that are accessed indirectly through an address/data
126 static void t3_read_indirect(adapter_t *adap, unsigned int addr_reg,
127 unsigned int data_reg, u32 *vals, unsigned int nregs,
128 unsigned int start_idx)
131 t3_write_reg(adap, addr_reg, start_idx);
132 *vals++ = t3_read_reg(adap, data_reg);
138 * t3_mc7_bd_read - read from MC7 through backdoor accesses
139 * @mc7: identifies MC7 to read from
140 * @start: index of first 64-bit word to read
141 * @n: number of 64-bit words to read
142 * @buf: where to store the read result
144 * Read n 64-bit words from MC7 starting at word start, using backdoor
147 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
150 static int shift[] = { 0, 0, 16, 24 };
151 static int step[] = { 0, 32, 16, 8 };
153 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
154 adapter_t *adap = mc7->adapter;
156 if (start >= size64 || start + n > size64)
159 start *= (8 << mc7->width);
164 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
168 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR,
170 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
171 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
172 while ((val & F_BUSY) && attempts--)
173 val = t3_read_reg(adap,
174 mc7->offset + A_MC7_BD_OP);
178 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
179 if (mc7->width == 0) {
180 val64 = t3_read_reg(adap,
181 mc7->offset + A_MC7_BD_DATA0);
182 val64 |= (u64)val << 32;
185 val >>= shift[mc7->width];
186 val64 |= (u64)val << (step[mc7->width] * i);
198 static void mi1_init(adapter_t *adap, const struct adapter_info *ai)
200 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
201 u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
204 if (!(ai->caps & SUPPORTED_10000baseT_Full))
206 t3_write_reg(adap, A_MI1_CFG, val);
209 #define MDIO_ATTEMPTS 20
212 * MI1 read/write operations for direct-addressed PHYs.
214 static int mi1_read(adapter_t *adapter, int phy_addr, int mmd_addr,
215 int reg_addr, unsigned int *valp)
218 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
224 t3_write_reg(adapter, A_MI1_ADDR, addr);
225 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
226 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
228 *valp = t3_read_reg(adapter, A_MI1_DATA);
229 MDIO_UNLOCK(adapter);
233 static int mi1_write(adapter_t *adapter, int phy_addr, int mmd_addr,
234 int reg_addr, unsigned int val)
237 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
243 t3_write_reg(adapter, A_MI1_ADDR, addr);
244 t3_write_reg(adapter, A_MI1_DATA, val);
245 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
246 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
247 MDIO_UNLOCK(adapter);
251 static struct mdio_ops mi1_mdio_ops = {
257 * MI1 read/write operations for indirect-addressed PHYs.
259 static int mi1_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
260 int reg_addr, unsigned int *valp)
263 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
266 t3_write_reg(adapter, A_MI1_ADDR, addr);
267 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
268 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
269 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
271 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
272 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
275 *valp = t3_read_reg(adapter, A_MI1_DATA);
277 MDIO_UNLOCK(adapter);
281 static int mi1_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
282 int reg_addr, unsigned int val)
285 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
288 t3_write_reg(adapter, A_MI1_ADDR, addr);
289 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
290 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
291 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
293 t3_write_reg(adapter, A_MI1_DATA, val);
294 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
295 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
298 MDIO_UNLOCK(adapter);
302 static struct mdio_ops mi1_mdio_ext_ops = {
308 * t3_mdio_change_bits - modify the value of a PHY register
309 * @phy: the PHY to operate on
310 * @mmd: the device address
311 * @reg: the register address
312 * @clear: what part of the register value to mask off
313 * @set: what part of the register value to set
315 * Changes the value of a PHY register by applying a mask to its current
316 * value and ORing the result with a new value.
318 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
324 ret = mdio_read(phy, mmd, reg, &val);
327 ret = mdio_write(phy, mmd, reg, val | set);
333 * t3_phy_reset - reset a PHY block
334 * @phy: the PHY to operate on
335 * @mmd: the device address of the PHY block to reset
336 * @wait: how long to wait for the reset to complete in 1ms increments
338 * Resets a PHY block and optionally waits for the reset to complete.
339 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
342 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
347 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
352 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
358 } while (ctl && --wait);
364 * t3_phy_advertise - set the PHY advertisement registers for autoneg
365 * @phy: the PHY to operate on
366 * @advert: bitmap of capabilities the PHY should advertise
368 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
369 * requested capabilities.
371 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
374 unsigned int val = 0;
376 err = mdio_read(phy, 0, MII_CTRL1000, &val);
380 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
381 if (advert & ADVERTISED_1000baseT_Half)
382 val |= ADVERTISE_1000HALF;
383 if (advert & ADVERTISED_1000baseT_Full)
384 val |= ADVERTISE_1000FULL;
386 err = mdio_write(phy, 0, MII_CTRL1000, val);
391 if (advert & ADVERTISED_10baseT_Half)
392 val |= ADVERTISE_10HALF;
393 if (advert & ADVERTISED_10baseT_Full)
394 val |= ADVERTISE_10FULL;
395 if (advert & ADVERTISED_100baseT_Half)
396 val |= ADVERTISE_100HALF;
397 if (advert & ADVERTISED_100baseT_Full)
398 val |= ADVERTISE_100FULL;
399 if (advert & ADVERTISED_Pause)
400 val |= ADVERTISE_PAUSE_CAP;
401 if (advert & ADVERTISED_Asym_Pause)
402 val |= ADVERTISE_PAUSE_ASYM;
403 return mdio_write(phy, 0, MII_ADVERTISE, val);
407 * t3_phy_advertise_fiber - set fiber PHY advertisement register
408 * @phy: the PHY to operate on
409 * @advert: bitmap of capabilities the PHY should advertise
411 * Sets a fiber PHY's advertisement register to advertise the
412 * requested capabilities.
414 int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
416 unsigned int val = 0;
418 if (advert & ADVERTISED_1000baseT_Half)
419 val |= ADVERTISE_1000XHALF;
420 if (advert & ADVERTISED_1000baseT_Full)
421 val |= ADVERTISE_1000XFULL;
422 if (advert & ADVERTISED_Pause)
423 val |= ADVERTISE_1000XPAUSE;
424 if (advert & ADVERTISED_Asym_Pause)
425 val |= ADVERTISE_1000XPSE_ASYM;
426 return mdio_write(phy, 0, MII_ADVERTISE, val);
430 * t3_set_phy_speed_duplex - force PHY speed and duplex
431 * @phy: the PHY to operate on
432 * @speed: requested PHY speed
433 * @duplex: requested PHY duplex
435 * Force a 10/100/1000 PHY's speed and duplex. This also disables
436 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
438 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
443 err = mdio_read(phy, 0, MII_BMCR, &ctl);
448 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
449 if (speed == SPEED_100)
450 ctl |= BMCR_SPEED100;
451 else if (speed == SPEED_1000)
452 ctl |= BMCR_SPEED1000;
455 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
456 if (duplex == DUPLEX_FULL)
457 ctl |= BMCR_FULLDPLX;
459 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
460 ctl |= BMCR_ANENABLE;
461 return mdio_write(phy, 0, MII_BMCR, ctl);
464 static struct adapter_info t3_adap_info[] = {
466 F_GPIO2_OEN | F_GPIO4_OEN |
467 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
469 &mi1_mdio_ops, "Chelsio PE9000" },
471 F_GPIO2_OEN | F_GPIO4_OEN |
472 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
474 &mi1_mdio_ops, "Chelsio T302" },
476 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
477 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
478 0, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
479 &mi1_mdio_ext_ops, "Chelsio T310" },
481 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
482 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
483 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
484 SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
485 &mi1_mdio_ext_ops, "Chelsio T320" },
487 F_GPIO5_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO5_OUT_VAL |
488 F_GPIO6_OUT_VAL | F_GPIO7_OUT_VAL,
489 F_GPIO1 | F_GPIO2 | F_GPIO3 | F_GPIO4, SUPPORTED_AUI,
490 &mi1_mdio_ops, "Chelsio T304" },
494 * Return the adapter_info structure with a given index. Out-of-range indices
497 const struct adapter_info *t3_get_adapter_info(unsigned int id)
499 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
502 static struct port_type_info port_types[] = {
504 { t3_ael1002_phy_prep },
505 { t3_vsc8211_phy_prep },
506 { t3_mv88e1xxx_phy_prep },
507 { t3_xaui_direct_phy_prep },
509 { t3_qt2045_phy_prep },
510 { t3_ael1006_phy_prep },
514 #define VPD_ENTRY(name, len) \
515 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
518 * Partial EEPROM Vital Product Data structure. Includes only the ID and
527 VPD_ENTRY(pn, 16); /* part number */
528 VPD_ENTRY(ec, 16); /* EC level */
529 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
530 VPD_ENTRY(na, 12); /* MAC address base */
531 VPD_ENTRY(cclk, 6); /* core clock */
532 VPD_ENTRY(mclk, 6); /* mem clock */
533 VPD_ENTRY(uclk, 6); /* uP clk */
534 VPD_ENTRY(mdc, 6); /* MDIO clk */
535 VPD_ENTRY(mt, 2); /* mem timing */
536 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
537 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
538 VPD_ENTRY(port0, 2); /* PHY0 complex */
539 VPD_ENTRY(port1, 2); /* PHY1 complex */
540 VPD_ENTRY(port2, 2); /* PHY2 complex */
541 VPD_ENTRY(port3, 2); /* PHY3 complex */
542 VPD_ENTRY(rv, 1); /* csum */
543 u32 pad; /* for multiple-of-4 sizing and alignment */
546 #define EEPROM_MAX_POLL 4
547 #define EEPROM_STAT_ADDR 0x4000
548 #define VPD_BASE 0xc00
551 * t3_seeprom_read - read a VPD EEPROM location
552 * @adapter: adapter to read
553 * @addr: EEPROM address
554 * @data: where to store the read data
556 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
557 * VPD ROM capability. A zero is written to the flag bit when the
558 * addres is written to the control register. The hardware device will
559 * set the flag to 1 when 4 bytes have been read into the data register.
561 int t3_seeprom_read(adapter_t *adapter, u32 addr, u32 *data)
564 int attempts = EEPROM_MAX_POLL;
565 unsigned int base = adapter->params.pci.vpd_cap_addr;
567 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
570 t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR, (u16)addr);
573 t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
574 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
576 if (!(val & PCI_VPD_ADDR_F)) {
577 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
580 t3_os_pci_read_config_4(adapter, base + PCI_VPD_DATA, data);
581 *data = le32_to_cpu(*data);
586 * t3_seeprom_write - write a VPD EEPROM location
587 * @adapter: adapter to write
588 * @addr: EEPROM address
589 * @data: value to write
591 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
592 * VPD ROM capability.
594 int t3_seeprom_write(adapter_t *adapter, u32 addr, u32 data)
597 int attempts = EEPROM_MAX_POLL;
598 unsigned int base = adapter->params.pci.vpd_cap_addr;
600 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
603 t3_os_pci_write_config_4(adapter, base + PCI_VPD_DATA,
605 t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR,
606 (u16)addr | PCI_VPD_ADDR_F);
609 t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
610 } while ((val & PCI_VPD_ADDR_F) && --attempts);
612 if (val & PCI_VPD_ADDR_F) {
613 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
620 * t3_seeprom_wp - enable/disable EEPROM write protection
621 * @adapter: the adapter
622 * @enable: 1 to enable write protection, 0 to disable it
624 * Enables or disables write protection on the serial EEPROM.
626 int t3_seeprom_wp(adapter_t *adapter, int enable)
628 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
632 * Convert a character holding a hex digit to a number.
634 static unsigned int hex2int(unsigned char c)
636 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
640 * get_vpd_params - read VPD parameters from VPD EEPROM
641 * @adapter: adapter to read
642 * @p: where to store the parameters
644 * Reads card parameters stored in VPD EEPROM.
646 static int get_vpd_params(adapter_t *adapter, struct vpd_params *p)
652 * Card information is normally at VPD_BASE but some early cards had
655 ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
658 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
660 for (i = 0; i < sizeof(vpd); i += 4) {
661 ret = t3_seeprom_read(adapter, addr + i,
662 (u32 *)((u8 *)&vpd + i));
667 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
668 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
669 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
670 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
671 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
672 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
674 /* Old eeproms didn't have port information */
675 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
676 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
677 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
679 p->port_type[0] = (u8)hex2int(vpd.port0_data[0]);
680 p->port_type[1] = (u8)hex2int(vpd.port1_data[0]);
681 p->port_type[2] = (u8)hex2int(vpd.port2_data[0]);
682 p->port_type[3] = (u8)hex2int(vpd.port3_data[0]);
683 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
684 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
687 for (i = 0; i < 6; i++)
688 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
689 hex2int(vpd.na_data[2 * i + 1]);
693 /* BIOS boot header */
694 typedef struct boot_header_s {
695 u8 signature[2]; /* signature */
696 u8 length; /* image length (include header) */
697 u8 offset[4]; /* initialization vector */
698 u8 reserved[19]; /* reserved */
699 u8 exheader[2]; /* offset to expansion header */
702 /* serial flash and firmware constants */
704 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
705 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
706 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
708 /* flash command opcodes */
709 SF_PROG_PAGE = 2, /* program page */
710 SF_WR_DISABLE = 4, /* disable writes */
711 SF_RD_STATUS = 5, /* read status register */
712 SF_WR_ENABLE = 6, /* enable writes */
713 SF_RD_DATA_FAST = 0xb, /* read flash */
714 SF_ERASE_SECTOR = 0xd8, /* erase sector */
716 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
717 FW_VERS_ADDR = 0x77ffc, /* flash address holding FW version */
718 FW_MIN_SIZE = 8, /* at least version and csum */
719 FW_MAX_SIZE = FW_VERS_ADDR - FW_FLASH_BOOT_ADDR,
721 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
722 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */
723 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */
724 BOOT_MIN_SIZE = sizeof(boot_header_t), /* at least basic header */
725 BOOT_MAX_SIZE = 0xff*BOOT_SIZE_INC /* 1 byte * length increment */
729 * sf1_read - read data from the serial flash
730 * @adapter: the adapter
731 * @byte_cnt: number of bytes to read
732 * @cont: whether another operation will be chained
733 * @valp: where to store the read data
735 * Reads up to 4 bytes of data from the serial flash. The location of
736 * the read needs to be specified prior to calling this by issuing the
737 * appropriate commands to the serial flash.
739 static int sf1_read(adapter_t *adapter, unsigned int byte_cnt, int cont,
744 if (!byte_cnt || byte_cnt > 4)
746 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
748 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
749 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
751 *valp = t3_read_reg(adapter, A_SF_DATA);
756 * sf1_write - write data to the serial flash
757 * @adapter: the adapter
758 * @byte_cnt: number of bytes to write
759 * @cont: whether another operation will be chained
760 * @val: value to write
762 * Writes up to 4 bytes of data to the serial flash. The location of
763 * the write needs to be specified prior to calling this by issuing the
764 * appropriate commands to the serial flash.
766 static int sf1_write(adapter_t *adapter, unsigned int byte_cnt, int cont,
769 if (!byte_cnt || byte_cnt > 4)
771 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
773 t3_write_reg(adapter, A_SF_DATA, val);
774 t3_write_reg(adapter, A_SF_OP,
775 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
776 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
780 * flash_wait_op - wait for a flash operation to complete
781 * @adapter: the adapter
782 * @attempts: max number of polls of the status register
783 * @delay: delay between polls in ms
785 * Wait for a flash operation to complete by polling the status register.
787 static int flash_wait_op(adapter_t *adapter, int attempts, int delay)
793 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
794 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
806 * t3_read_flash - read words from serial flash
807 * @adapter: the adapter
808 * @addr: the start address for the read
809 * @nwords: how many 32-bit words to read
810 * @data: where to store the read data
811 * @byte_oriented: whether to store data as bytes or as words
813 * Read the specified number of 32-bit words from the serial flash.
814 * If @byte_oriented is set the read data is stored as a byte array
815 * (i.e., big-endian), otherwise as 32-bit words in the platform's
818 int t3_read_flash(adapter_t *adapter, unsigned int addr, unsigned int nwords,
819 u32 *data, int byte_oriented)
823 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
826 addr = swab32(addr) | SF_RD_DATA_FAST;
828 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
829 (ret = sf1_read(adapter, 1, 1, data)) != 0)
832 for ( ; nwords; nwords--, data++) {
833 ret = sf1_read(adapter, 4, nwords > 1, data);
837 *data = htonl(*data);
843 * t3_write_flash - write up to a page of data to the serial flash
844 * @adapter: the adapter
845 * @addr: the start address to write
846 * @n: length of data to write
847 * @data: the data to write
848 * @byte_oriented: whether to store data as bytes or as words
850 * Writes up to a page of data (256 bytes) to the serial flash starting
851 * at the given address.
852 * If @byte_oriented is set the write data is stored as a 32-bit
853 * big-endian array, otherwise in the processor's native endianess.
856 static int t3_write_flash(adapter_t *adapter, unsigned int addr,
857 unsigned int n, const u8 *data,
862 unsigned int c, left, val, offset = addr & 0xff;
864 if (addr + n > SF_SIZE || offset + n > 256)
867 val = swab32(addr) | SF_PROG_PAGE;
869 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
870 (ret = sf1_write(adapter, 4, 1, val)) != 0)
873 for (left = n; left; left -= c) {
875 val = *(const u32*)data;
880 ret = sf1_write(adapter, c, c != left, val);
884 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
887 /* Read the page to verify the write succeeded */
888 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
893 if (memcmp(data - n, (u8 *)buf + offset, n))
899 * t3_get_tp_version - read the tp sram version
900 * @adapter: the adapter
901 * @vers: where to place the version
903 * Reads the protocol sram version from sram.
905 int t3_get_tp_version(adapter_t *adapter, u32 *vers)
909 /* Get version loaded in SRAM */
910 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
911 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
916 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
922 * t3_check_tpsram_version - read the tp sram version
923 * @adapter: the adapter
926 int t3_check_tpsram_version(adapter_t *adapter, int *must_load)
930 unsigned int major, minor;
932 if (adapter->params.rev == T3_REV_A)
937 ret = t3_get_tp_version(adapter, &vers);
941 vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
943 major = G_TP_VERSION_MAJOR(vers);
944 minor = G_TP_VERSION_MINOR(vers);
946 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
949 if (major != TP_VERSION_MAJOR)
950 CH_ERR(adapter, "found wrong TP version (%u.%u), "
951 "driver needs version %d.%d\n", major, minor,
952 TP_VERSION_MAJOR, TP_VERSION_MINOR);
955 CH_ERR(adapter, "found wrong TP version (%u.%u), "
956 "driver compiled for version %d.%d\n", major, minor,
957 TP_VERSION_MAJOR, TP_VERSION_MINOR);
963 * t3_check_tpsram - check if provided protocol SRAM
964 * is compatible with this driver
965 * @adapter: the adapter
966 * @tp_sram: the firmware image to write
969 * Checks if an adapter's tp sram is compatible with the driver.
970 * Returns 0 if the versions are compatible, a negative error otherwise.
972 int t3_check_tpsram(adapter_t *adapter, const u8 *tp_sram, unsigned int size)
976 const u32 *p = (const u32 *)tp_sram;
978 /* Verify checksum */
979 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
981 if (csum != 0xffffffff) {
982 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
990 enum fw_version_type {
996 * t3_get_fw_version - read the firmware version
997 * @adapter: the adapter
998 * @vers: where to place the version
1000 * Reads the FW version from flash.
1002 int t3_get_fw_version(adapter_t *adapter, u32 *vers)
1004 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1008 * t3_check_fw_version - check if the FW is compatible with this driver
1009 * @adapter: the adapter
1011 * Checks if an adapter's FW is compatible with the driver. Returns 0
1012 * if the versions are compatible, a negative error otherwise.
1014 int t3_check_fw_version(adapter_t *adapter, int *must_load)
1018 unsigned int type, major, minor;
1021 ret = t3_get_fw_version(adapter, &vers);
1025 type = G_FW_VERSION_TYPE(vers);
1026 major = G_FW_VERSION_MAJOR(vers);
1027 minor = G_FW_VERSION_MINOR(vers);
1029 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1030 minor == FW_VERSION_MINOR)
1033 if (major != FW_VERSION_MAJOR)
1034 CH_ERR(adapter, "found wrong FW version(%u.%u), "
1035 "driver needs version %u.%u\n", major, minor,
1036 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1037 else if ((int)minor < FW_VERSION_MINOR) {
1039 CH_WARN(adapter, "found old FW minor version(%u.%u), "
1040 "driver compiled for version %u.%u\n", major, minor,
1041 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1043 CH_WARN(adapter, "found newer FW version(%u.%u), "
1044 "driver compiled for version %u.%u\n", major, minor,
1045 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1052 * t3_flash_erase_sectors - erase a range of flash sectors
1053 * @adapter: the adapter
1054 * @start: the first sector to erase
1055 * @end: the last sector to erase
1057 * Erases the sectors in the given range.
1059 static int t3_flash_erase_sectors(adapter_t *adapter, int start, int end)
1061 while (start <= end) {
1064 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1065 (ret = sf1_write(adapter, 4, 0,
1066 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1067 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1075 * t3_load_fw - download firmware
1076 * @adapter: the adapter
1077 * @fw_data: the firmware image to write
1080 * Write the supplied firmware image to the card's serial flash.
1081 * The FW image has the following sections: @size - 8 bytes of code and
1082 * data, followed by 4 bytes of FW version, followed by the 32-bit
1083 * 1's complement checksum of the whole image.
1085 int t3_load_fw(adapter_t *adapter, const u8 *fw_data, unsigned int size)
1089 const u32 *p = (const u32 *)fw_data;
1090 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1092 if ((size & 3) || size < FW_MIN_SIZE)
1094 if (size - 8 > FW_MAX_SIZE)
1097 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1098 csum += ntohl(p[i]);
1099 if (csum != 0xffffffff) {
1100 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1105 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1109 size -= 8; /* trim off version and checksum */
1110 for (addr = FW_FLASH_BOOT_ADDR; size; ) {
1111 unsigned int chunk_size = min(size, 256U);
1113 ret = t3_write_flash(adapter, addr, chunk_size, fw_data, 1);
1118 fw_data += chunk_size;
1122 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data, 1);
1125 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1130 * t3_load_boot - download boot flash
1131 * @adapter: the adapter
1132 * @boot_data: the boot image to write
1135 * Write the supplied boot image to the card's serial flash.
1136 * The boot image has the following sections: a 28-byte header and the
1139 int t3_load_boot(adapter_t *adapter, u8 *boot_data, unsigned int size)
1141 boot_header_t *header = (boot_header_t *)boot_data;
1144 unsigned int boot_sector = BOOT_FLASH_BOOT_ADDR >> 16;
1145 unsigned int boot_end = (BOOT_FLASH_BOOT_ADDR + size - 1) >> 16;
1148 * Perform some primitive sanity testing to avoid accidentally
1149 * writing garbage over the boot sectors. We ought to check for
1150 * more but it's not worth it for now ...
1152 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
1153 CH_ERR(adapter, "boot image too small/large\n");
1156 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE) {
1157 CH_ERR(adapter, "boot image missing signature\n");
1160 if (header->length * BOOT_SIZE_INC != size) {
1161 CH_ERR(adapter, "boot image header length != image length\n");
1165 ret = t3_flash_erase_sectors(adapter, boot_sector, boot_end);
1169 for (addr = BOOT_FLASH_BOOT_ADDR; size; ) {
1170 unsigned int chunk_size = min(size, 256U);
1172 ret = t3_write_flash(adapter, addr, chunk_size, boot_data, 0);
1177 boot_data += chunk_size;
1183 CH_ERR(adapter, "boot image download failed, error %d\n", ret);
1187 #define CIM_CTL_BASE 0x2000
1190 * t3_cim_ctl_blk_read - read a block from CIM control region
1191 * @adap: the adapter
1192 * @addr: the start address within the CIM control region
1193 * @n: number of words to read
1194 * @valp: where to store the result
1196 * Reads a block of 4-byte words from the CIM control region.
1198 int t3_cim_ctl_blk_read(adapter_t *adap, unsigned int addr, unsigned int n,
1203 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1206 for ( ; !ret && n--; addr += 4) {
1207 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1208 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1211 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1217 * t3_link_changed - handle interface link changes
1218 * @adapter: the adapter
1219 * @port_id: the port index that changed link state
1221 * Called when a port's link settings change to propagate the new values
1222 * to the associated PHY and MAC. After performing the common tasks it
1223 * invokes an OS-specific handler.
1225 void t3_link_changed(adapter_t *adapter, int port_id)
1227 int link_ok, speed, duplex, fc;
1228 struct port_info *pi = adap2pinfo(adapter, port_id);
1229 struct cphy *phy = &pi->phy;
1230 struct cmac *mac = &pi->mac;
1231 struct link_config *lc = &pi->link_config;
1233 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1235 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1236 uses_xaui(adapter)) {
1239 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1240 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1242 lc->link_ok = (unsigned char)link_ok;
1243 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1244 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1245 if (lc->requested_fc & PAUSE_AUTONEG)
1246 fc &= lc->requested_fc;
1248 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1250 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1251 /* Set MAC speed, duplex, and flow control to match PHY. */
1252 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1253 lc->fc = (unsigned char)fc;
1256 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1260 * t3_link_start - apply link configuration to MAC/PHY
1261 * @phy: the PHY to setup
1262 * @mac: the MAC to setup
1263 * @lc: the requested link configuration
1265 * Set up a port's MAC and PHY according to a desired link configuration.
1266 * - If the PHY can auto-negotiate first decide what to advertise, then
1267 * enable/disable auto-negotiation as desired, and reset.
1268 * - If the PHY does not auto-negotiate just reset it.
1269 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1270 * otherwise do it later based on the outcome of auto-negotiation.
1272 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1274 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1277 if (lc->supported & SUPPORTED_Autoneg) {
1278 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1280 lc->advertising |= ADVERTISED_Asym_Pause;
1282 lc->advertising |= ADVERTISED_Pause;
1284 phy->ops->advertise(phy, lc->advertising);
1286 if (lc->autoneg == AUTONEG_DISABLE) {
1287 lc->speed = lc->requested_speed;
1288 lc->duplex = lc->requested_duplex;
1289 lc->fc = (unsigned char)fc;
1290 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1292 /* Also disables autoneg */
1293 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1295 phy->ops->autoneg_enable(phy);
1297 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1298 lc->fc = (unsigned char)fc;
1299 phy->ops->reset(phy, 0);
1305 * t3_set_vlan_accel - control HW VLAN extraction
1306 * @adapter: the adapter
1307 * @ports: bitmap of adapter ports to operate on
1308 * @on: enable (1) or disable (0) HW VLAN extraction
1310 * Enables or disables HW extraction of VLAN tags for the given port.
1312 void t3_set_vlan_accel(adapter_t *adapter, unsigned int ports, int on)
1314 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1315 ports << S_VLANEXTRACTIONENABLE,
1316 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1320 unsigned int mask; /* bits to check in interrupt status */
1321 const char *msg; /* message to print or NULL */
1322 short stat_idx; /* stat counter to increment or -1 */
1323 unsigned short fatal:1; /* whether the condition reported is fatal */
1327 * t3_handle_intr_status - table driven interrupt handler
1328 * @adapter: the adapter that generated the interrupt
1329 * @reg: the interrupt status register to process
1330 * @mask: a mask to apply to the interrupt status
1331 * @acts: table of interrupt actions
1332 * @stats: statistics counters tracking interrupt occurences
1334 * A table driven interrupt handler that applies a set of masks to an
1335 * interrupt status word and performs the corresponding actions if the
1336 * interrupts described by the mask have occured. The actions include
1337 * optionally printing a warning or alert message, and optionally
1338 * incrementing a stat counter. The table is terminated by an entry
1339 * specifying mask 0. Returns the number of fatal interrupt conditions.
1341 static int t3_handle_intr_status(adapter_t *adapter, unsigned int reg,
1343 const struct intr_info *acts,
1344 unsigned long *stats)
1347 unsigned int status = t3_read_reg(adapter, reg) & mask;
1349 for ( ; acts->mask; ++acts) {
1350 if (!(status & acts->mask)) continue;
1353 CH_ALERT(adapter, "%s (0x%x)\n",
1354 acts->msg, status & acts->mask);
1355 } else if (acts->msg)
1356 CH_WARN(adapter, "%s (0x%x)\n",
1357 acts->msg, status & acts->mask);
1358 if (acts->stat_idx >= 0)
1359 stats[acts->stat_idx]++;
1361 if (status) /* clear processed interrupts */
1362 t3_write_reg(adapter, reg, status);
1366 #define SGE_INTR_MASK (F_RSPQDISABLED | \
1367 F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1368 F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1369 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1370 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1371 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1373 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1374 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1376 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1377 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1378 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1379 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1380 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1381 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1382 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1383 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1384 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1385 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1386 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1387 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1388 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1389 F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1390 F_TXPARERR | V_BISTERR(M_BISTERR))
1391 #define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1392 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1393 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1394 #define ULPTX_INTR_MASK 0xfc
1395 #define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1396 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1397 F_ZERO_SWITCH_ERROR)
1398 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1399 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1400 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1401 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1402 F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1403 F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1404 F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1405 F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1406 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1407 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1408 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1409 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1410 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1411 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1412 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1413 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1414 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1415 V_MCAPARERRENB(M_MCAPARERRENB))
1416 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1417 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1418 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1419 F_MPS0 | F_CPL_SWITCH)
1422 * Interrupt handler for the PCIX1 module.
1424 static void pci_intr_handler(adapter_t *adapter)
1426 static struct intr_info pcix1_intr_info[] = {
1427 { F_MSTDETPARERR, "PCI master detected parity error", -1, 1 },
1428 { F_SIGTARABT, "PCI signaled target abort", -1, 1 },
1429 { F_RCVTARABT, "PCI received target abort", -1, 1 },
1430 { F_RCVMSTABT, "PCI received master abort", -1, 1 },
1431 { F_SIGSYSERR, "PCI signaled system error", -1, 1 },
1432 { F_DETPARERR, "PCI detected parity error", -1, 1 },
1433 { F_SPLCMPDIS, "PCI split completion discarded", -1, 1 },
1434 { F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1 },
1435 { F_RCVSPLCMPERR, "PCI received split completion error", -1,
1437 { F_DETCORECCERR, "PCI correctable ECC error",
1438 STAT_PCI_CORR_ECC, 0 },
1439 { F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1 },
1440 { F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1441 { V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1443 { V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1445 { V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1447 { V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1452 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1453 pcix1_intr_info, adapter->irq_stats))
1454 t3_fatal_err(adapter);
1458 * Interrupt handler for the PCIE module.
1460 static void pcie_intr_handler(adapter_t *adapter)
1462 static struct intr_info pcie_intr_info[] = {
1463 { F_PEXERR, "PCI PEX error", -1, 1 },
1465 "PCI unexpected split completion DMA read error", -1, 1 },
1467 "PCI unexpected split completion DMA command error", -1, 1 },
1468 { F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1469 { F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1 },
1470 { F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1 },
1471 { F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1 },
1472 { V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1473 "PCI MSI-X table/PBA parity error", -1, 1 },
1474 { F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1 },
1475 { F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1 },
1476 { F_RXPARERR, "PCI Rx parity error", -1, 1 },
1477 { F_TXPARERR, "PCI Tx parity error", -1, 1 },
1478 { V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1 },
1482 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1483 CH_ALERT(adapter, "PEX error code 0x%x\n",
1484 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1486 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1487 pcie_intr_info, adapter->irq_stats))
1488 t3_fatal_err(adapter);
1492 * TP interrupt handler.
1494 static void tp_intr_handler(adapter_t *adapter)
1496 static struct intr_info tp_intr_info[] = {
1497 { 0xffffff, "TP parity error", -1, 1 },
1498 { 0x1000000, "TP out of Rx pages", -1, 1 },
1499 { 0x2000000, "TP out of Tx pages", -1, 1 },
1502 static struct intr_info tp_intr_info_t3c[] = {
1503 { 0x1fffffff, "TP parity error", -1, 1 },
1504 { F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1 },
1505 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1509 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1510 adapter->params.rev < T3_REV_C ?
1511 tp_intr_info : tp_intr_info_t3c, NULL))
1512 t3_fatal_err(adapter);
1516 * CIM interrupt handler.
1518 static void cim_intr_handler(adapter_t *adapter)
1520 static struct intr_info cim_intr_info[] = {
1521 { F_RSVDSPACEINT, "CIM reserved space write", -1, 1 },
1522 { F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1 },
1523 { F_FLASHRANGEINT, "CIM flash address out of range", -1, 1 },
1524 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1525 { F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1 },
1526 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1527 { F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1 },
1528 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1529 { F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1 },
1530 { F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1 },
1531 { F_BLKRDPLINT, "CIM block read from PL space", -1, 1 },
1532 { F_BLKWRPLINT, "CIM block write to PL space", -1, 1 },
1533 { F_DRAMPARERR, "CIM DRAM parity error", -1, 1 },
1534 { F_ICACHEPARERR, "CIM icache parity error", -1, 1 },
1535 { F_DCACHEPARERR, "CIM dcache parity error", -1, 1 },
1536 { F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1 },
1537 { F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1 },
1538 { F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1 },
1539 { F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1 },
1540 { F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1 },
1541 { F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1 },
1542 { F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1 },
1543 { F_ITAGPARERR, "CIM itag parity error", -1, 1 },
1544 { F_DTAGPARERR, "CIM dtag parity error", -1, 1 },
1548 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, CIM_INTR_MASK,
1549 cim_intr_info, NULL))
1550 t3_fatal_err(adapter);
1554 * ULP RX interrupt handler.
1556 static void ulprx_intr_handler(adapter_t *adapter)
1558 static struct intr_info ulprx_intr_info[] = {
1559 { F_PARERRDATA, "ULP RX data parity error", -1, 1 },
1560 { F_PARERRPCMD, "ULP RX command parity error", -1, 1 },
1561 { F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1 },
1562 { F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1 },
1563 { F_ARBFPERR, "ULP RX ArbF parity error", -1, 1 },
1564 { F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1 },
1565 { F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1 },
1566 { F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1 },
1570 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1571 ulprx_intr_info, NULL))
1572 t3_fatal_err(adapter);
1576 * ULP TX interrupt handler.
1578 static void ulptx_intr_handler(adapter_t *adapter)
1580 static struct intr_info ulptx_intr_info[] = {
1581 { F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1582 STAT_ULP_CH0_PBL_OOB, 0 },
1583 { F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1584 STAT_ULP_CH1_PBL_OOB, 0 },
1585 { 0xfc, "ULP TX parity error", -1, 1 },
1589 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1590 ulptx_intr_info, adapter->irq_stats))
1591 t3_fatal_err(adapter);
1594 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1595 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1596 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1597 F_ICSPI1_TX_FRAMING_ERROR)
1598 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1599 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1600 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1601 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1604 * PM TX interrupt handler.
1606 static void pmtx_intr_handler(adapter_t *adapter)
1608 static struct intr_info pmtx_intr_info[] = {
1609 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1610 { ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1 },
1611 { OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1 },
1612 { V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1613 "PMTX ispi parity error", -1, 1 },
1614 { V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1615 "PMTX ospi parity error", -1, 1 },
1619 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1620 pmtx_intr_info, NULL))
1621 t3_fatal_err(adapter);
1624 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1625 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1626 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1627 F_IESPI1_TX_FRAMING_ERROR)
1628 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1629 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1630 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1631 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1634 * PM RX interrupt handler.
1636 static void pmrx_intr_handler(adapter_t *adapter)
1638 static struct intr_info pmrx_intr_info[] = {
1639 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1640 { IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1 },
1641 { OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1 },
1642 { V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1643 "PMRX ispi parity error", -1, 1 },
1644 { V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1645 "PMRX ospi parity error", -1, 1 },
1649 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1650 pmrx_intr_info, NULL))
1651 t3_fatal_err(adapter);
1655 * CPL switch interrupt handler.
1657 static void cplsw_intr_handler(adapter_t *adapter)
1659 static struct intr_info cplsw_intr_info[] = {
1660 { F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1 },
1661 { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 },
1662 { F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1 },
1663 { F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1 },
1664 { F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1 },
1665 { F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1 },
1669 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1670 cplsw_intr_info, NULL))
1671 t3_fatal_err(adapter);
1675 * MPS interrupt handler.
1677 static void mps_intr_handler(adapter_t *adapter)
1679 static struct intr_info mps_intr_info[] = {
1680 { 0x1ff, "MPS parity error", -1, 1 },
1684 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1685 mps_intr_info, NULL))
1686 t3_fatal_err(adapter);
1689 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1692 * MC7 interrupt handler.
1694 static void mc7_intr_handler(struct mc7 *mc7)
1696 adapter_t *adapter = mc7->adapter;
1697 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1700 mc7->stats.corr_err++;
1701 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1702 "data 0x%x 0x%x 0x%x\n", mc7->name,
1703 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1704 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1705 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1706 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1710 mc7->stats.uncorr_err++;
1711 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1712 "data 0x%x 0x%x 0x%x\n", mc7->name,
1713 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1714 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1715 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1716 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1720 mc7->stats.parity_err++;
1721 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1722 mc7->name, G_PE(cause));
1728 if (adapter->params.rev > 0)
1729 addr = t3_read_reg(adapter,
1730 mc7->offset + A_MC7_ERR_ADDR);
1731 mc7->stats.addr_err++;
1732 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1736 if (cause & MC7_INTR_FATAL)
1737 t3_fatal_err(adapter);
1739 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1742 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1743 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1745 * XGMAC interrupt handler.
1747 static int mac_intr_handler(adapter_t *adap, unsigned int idx)
1752 idx = idx == 0 ? 0 : adapter_info(adap)->nports0; /* MAC idx -> port */
1753 mac = &adap2pinfo(adap, idx)->mac;
1754 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1756 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1757 mac->stats.tx_fifo_parity_err++;
1758 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1760 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1761 mac->stats.rx_fifo_parity_err++;
1762 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1764 if (cause & F_TXFIFO_UNDERRUN)
1765 mac->stats.tx_fifo_urun++;
1766 if (cause & F_RXFIFO_OVERFLOW)
1767 mac->stats.rx_fifo_ovfl++;
1768 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1769 mac->stats.serdes_signal_loss++;
1770 if (cause & F_XAUIPCSCTCERR)
1771 mac->stats.xaui_pcs_ctc_err++;
1772 if (cause & F_XAUIPCSALIGNCHANGE)
1773 mac->stats.xaui_pcs_align_change++;
1775 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1776 if (cause & XGM_INTR_FATAL)
1782 * Interrupt handler for PHY events.
1784 int t3_phy_intr_handler(adapter_t *adapter)
1786 u32 mask, gpi = adapter_info(adapter)->gpio_intr;
1787 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1789 for_each_port(adapter, i) {
1790 struct port_info *p = adap2pinfo(adapter, i);
1792 mask = gpi - (gpi & (gpi - 1));
1795 if (!(p->phy.caps & SUPPORTED_IRQ))
1799 int phy_cause = p->phy.ops->intr_handler(&p->phy);
1801 if (phy_cause & cphy_cause_link_change)
1802 t3_link_changed(adapter, i);
1803 if (phy_cause & cphy_cause_fifo_error)
1804 p->phy.fifo_errors++;
1808 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1813 * t3_slow_intr_handler - control path interrupt handler
1814 * @adapter: the adapter
1816 * T3 interrupt handler for non-data interrupt events, e.g., errors.
1817 * The designation 'slow' is because it involves register reads, while
1818 * data interrupts typically don't involve any MMIOs.
1820 int t3_slow_intr_handler(adapter_t *adapter)
1822 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1824 cause &= adapter->slow_intr_mask;
1827 if (cause & F_PCIM0) {
1828 if (is_pcie(adapter))
1829 pcie_intr_handler(adapter);
1831 pci_intr_handler(adapter);
1834 t3_sge_err_intr_handler(adapter);
1835 if (cause & F_MC7_PMRX)
1836 mc7_intr_handler(&adapter->pmrx);
1837 if (cause & F_MC7_PMTX)
1838 mc7_intr_handler(&adapter->pmtx);
1839 if (cause & F_MC7_CM)
1840 mc7_intr_handler(&adapter->cm);
1842 cim_intr_handler(adapter);
1844 tp_intr_handler(adapter);
1845 if (cause & F_ULP2_RX)
1846 ulprx_intr_handler(adapter);
1847 if (cause & F_ULP2_TX)
1848 ulptx_intr_handler(adapter);
1849 if (cause & F_PM1_RX)
1850 pmrx_intr_handler(adapter);
1851 if (cause & F_PM1_TX)
1852 pmtx_intr_handler(adapter);
1853 if (cause & F_CPL_SWITCH)
1854 cplsw_intr_handler(adapter);
1856 mps_intr_handler(adapter);
1858 t3_mc5_intr_handler(&adapter->mc5);
1859 if (cause & F_XGMAC0_0)
1860 mac_intr_handler(adapter, 0);
1861 if (cause & F_XGMAC0_1)
1862 mac_intr_handler(adapter, 1);
1863 if (cause & F_T3DBG)
1864 t3_os_ext_intr_handler(adapter);
1866 /* Clear the interrupts just processed. */
1867 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1868 (void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1873 * t3_intr_enable - enable interrupts
1874 * @adapter: the adapter whose interrupts should be enabled
1876 * Enable interrupts by setting the interrupt enable registers of the
1877 * various HW modules and then enabling the top-level interrupt
1880 void t3_intr_enable(adapter_t *adapter)
1882 static struct addr_val_pair intr_en_avp[] = {
1883 { A_SG_INT_ENABLE, SGE_INTR_MASK },
1884 { A_MC7_INT_ENABLE, MC7_INTR_MASK },
1885 { A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1887 { A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1889 { A_MC5_DB_INT_ENABLE, MC5_INTR_MASK },
1890 { A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK },
1891 { A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK },
1892 { A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK },
1893 { A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK },
1894 { A_MPS_INT_ENABLE, MPS_INTR_MASK },
1897 adapter->slow_intr_mask = PL_INTR_MASK;
1899 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1900 t3_write_reg(adapter, A_TP_INT_ENABLE,
1901 adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
1903 if (adapter->params.rev > 0) {
1904 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1905 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1906 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1907 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1908 F_PBL_BOUND_ERR_CH1);
1910 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1911 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1914 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW,
1915 adapter_info(adapter)->gpio_intr);
1916 t3_write_reg(adapter, A_T3DBG_INT_ENABLE,
1917 adapter_info(adapter)->gpio_intr);
1918 if (is_pcie(adapter))
1919 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1921 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1922 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1923 (void) t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1927 * t3_intr_disable - disable a card's interrupts
1928 * @adapter: the adapter whose interrupts should be disabled
1930 * Disable interrupts. We only disable the top-level interrupt
1931 * concentrator and the SGE data interrupts.
1933 void t3_intr_disable(adapter_t *adapter)
1935 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1936 (void) t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1937 adapter->slow_intr_mask = 0;
1941 * t3_intr_clear - clear all interrupts
1942 * @adapter: the adapter whose interrupts should be cleared
1944 * Clears all interrupts.
1946 void t3_intr_clear(adapter_t *adapter)
1948 static const unsigned int cause_reg_addr[] = {
1950 A_SG_RSPQ_FL_STATUS,
1953 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1954 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1955 A_CIM_HOST_INT_CAUSE,
1968 /* Clear PHY and MAC interrupts for each port. */
1969 for_each_port(adapter, i)
1970 t3_port_intr_clear(adapter, i);
1972 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1973 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1975 if (is_pcie(adapter))
1976 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
1977 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1978 (void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1982 * t3_port_intr_enable - enable port-specific interrupts
1983 * @adapter: associated adapter
1984 * @idx: index of port whose interrupts should be enabled
1986 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1989 void t3_port_intr_enable(adapter_t *adapter, int idx)
1991 struct port_info *pi = adap2pinfo(adapter, idx);
1993 t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, XGM_INTR_MASK);
1994 pi->phy.ops->intr_enable(&pi->phy);
1998 * t3_port_intr_disable - disable port-specific interrupts
1999 * @adapter: associated adapter
2000 * @idx: index of port whose interrupts should be disabled
2002 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
2005 void t3_port_intr_disable(adapter_t *adapter, int idx)
2007 struct port_info *pi = adap2pinfo(adapter, idx);
2009 t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, 0);
2010 pi->phy.ops->intr_disable(&pi->phy);
2014 * t3_port_intr_clear - clear port-specific interrupts
2015 * @adapter: associated adapter
2016 * @idx: index of port whose interrupts to clear
2018 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
2021 void t3_port_intr_clear(adapter_t *adapter, int idx)
2023 struct port_info *pi = adap2pinfo(adapter, idx);
2025 t3_write_reg(adapter, A_XGM_INT_CAUSE + pi->mac.offset, 0xffffffff);
2026 pi->phy.ops->intr_clear(&pi->phy);
2029 #define SG_CONTEXT_CMD_ATTEMPTS 100
2032 * t3_sge_write_context - write an SGE context
2033 * @adapter: the adapter
2034 * @id: the context id
2035 * @type: the context type
2037 * Program an SGE context with the values already loaded in the
2038 * CONTEXT_DATA? registers.
2040 static int t3_sge_write_context(adapter_t *adapter, unsigned int id,
2043 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2044 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2045 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2046 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2047 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2048 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2049 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2050 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2053 static int clear_sge_ctxt(adapter_t *adap, unsigned int id, unsigned int type)
2055 t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2056 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2057 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2058 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2059 return t3_sge_write_context(adap, id, type);
2063 * t3_sge_init_ecntxt - initialize an SGE egress context
2064 * @adapter: the adapter to configure
2065 * @id: the context id
2066 * @gts_enable: whether to enable GTS for the context
2067 * @type: the egress context type
2068 * @respq: associated response queue
2069 * @base_addr: base address of queue
2070 * @size: number of queue entries
2072 * @gen: initial generation value for the context
2073 * @cidx: consumer pointer
2075 * Initialize an SGE egress context and make it ready for use. If the
2076 * platform allows concurrent context operations, the caller is
2077 * responsible for appropriate locking.
2079 int t3_sge_init_ecntxt(adapter_t *adapter, unsigned int id, int gts_enable,
2080 enum sge_context_type type, int respq, u64 base_addr,
2081 unsigned int size, unsigned int token, int gen,
2084 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2086 if (base_addr & 0xfff) /* must be 4K aligned */
2088 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2092 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2093 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2094 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2095 V_EC_BASE_LO((u32)base_addr & 0xffff));
2097 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, (u32)base_addr);
2099 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2100 V_EC_BASE_HI((u32)base_addr & 0xf) | V_EC_RESPQ(respq) |
2101 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2103 return t3_sge_write_context(adapter, id, F_EGRESS);
2107 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2108 * @adapter: the adapter to configure
2109 * @id: the context id
2110 * @gts_enable: whether to enable GTS for the context
2111 * @base_addr: base address of queue
2112 * @size: number of queue entries
2113 * @bsize: size of each buffer for this queue
2114 * @cong_thres: threshold to signal congestion to upstream producers
2115 * @gen: initial generation value for the context
2116 * @cidx: consumer pointer
2118 * Initialize an SGE free list context and make it ready for use. The
2119 * caller is responsible for ensuring only one context operation occurs
2122 int t3_sge_init_flcntxt(adapter_t *adapter, unsigned int id, int gts_enable,
2123 u64 base_addr, unsigned int size, unsigned int bsize,
2124 unsigned int cong_thres, int gen, unsigned int cidx)
2126 if (base_addr & 0xfff) /* must be 4K aligned */
2128 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2132 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, (u32)base_addr);
2134 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2135 V_FL_BASE_HI((u32)base_addr) |
2136 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2137 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2138 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2139 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2140 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2141 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2142 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2143 return t3_sge_write_context(adapter, id, F_FREELIST);
2147 * t3_sge_init_rspcntxt - initialize an SGE response queue context
2148 * @adapter: the adapter to configure
2149 * @id: the context id
2150 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2151 * @base_addr: base address of queue
2152 * @size: number of queue entries
2153 * @fl_thres: threshold for selecting the normal or jumbo free list
2154 * @gen: initial generation value for the context
2155 * @cidx: consumer pointer
2157 * Initialize an SGE response queue context and make it ready for use.
2158 * The caller is responsible for ensuring only one context operation
2161 int t3_sge_init_rspcntxt(adapter_t *adapter, unsigned int id, int irq_vec_idx,
2162 u64 base_addr, unsigned int size,
2163 unsigned int fl_thres, int gen, unsigned int cidx)
2165 unsigned int intr = 0;
2167 if (base_addr & 0xfff) /* must be 4K aligned */
2169 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2173 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2175 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
2177 if (irq_vec_idx >= 0)
2178 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2179 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2180 V_CQ_BASE_HI((u32)base_addr) | intr | V_RQ_GEN(gen));
2181 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2182 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2186 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2187 * @adapter: the adapter to configure
2188 * @id: the context id
2189 * @base_addr: base address of queue
2190 * @size: number of queue entries
2191 * @rspq: response queue for async notifications
2192 * @ovfl_mode: CQ overflow mode
2193 * @credits: completion queue credits
2194 * @credit_thres: the credit threshold
2196 * Initialize an SGE completion queue context and make it ready for use.
2197 * The caller is responsible for ensuring only one context operation
2200 int t3_sge_init_cqcntxt(adapter_t *adapter, unsigned int id, u64 base_addr,
2201 unsigned int size, int rspq, int ovfl_mode,
2202 unsigned int credits, unsigned int credit_thres)
2204 if (base_addr & 0xfff) /* must be 4K aligned */
2206 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2210 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2211 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
2213 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2214 V_CQ_BASE_HI((u32)base_addr) | V_CQ_RSPQ(rspq) |
2215 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2216 V_CQ_ERR(ovfl_mode));
2217 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2218 V_CQ_CREDIT_THRES(credit_thres));
2219 return t3_sge_write_context(adapter, id, F_CQ);
2223 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2224 * @adapter: the adapter
2225 * @id: the egress context id
2226 * @enable: enable (1) or disable (0) the context
2228 * Enable or disable an SGE egress context. The caller is responsible for
2229 * ensuring only one context operation occurs at a time.
2231 int t3_sge_enable_ecntxt(adapter_t *adapter, unsigned int id, int enable)
2233 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2236 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2237 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2238 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2239 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2240 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2241 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2242 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2243 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2244 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2248 * t3_sge_disable_fl - disable an SGE free-buffer list
2249 * @adapter: the adapter
2250 * @id: the free list context id
2252 * Disable an SGE free-buffer list. The caller is responsible for
2253 * ensuring only one context operation occurs at a time.
2255 int t3_sge_disable_fl(adapter_t *adapter, unsigned int id)
2257 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2260 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2261 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2262 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2263 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2264 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2265 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2266 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2267 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2268 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2272 * t3_sge_disable_rspcntxt - disable an SGE response queue
2273 * @adapter: the adapter
2274 * @id: the response queue context id
2276 * Disable an SGE response queue. The caller is responsible for
2277 * ensuring only one context operation occurs at a time.
2279 int t3_sge_disable_rspcntxt(adapter_t *adapter, unsigned int id)
2281 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2284 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2285 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2286 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2287 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2288 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2289 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2290 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2291 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2292 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2296 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2297 * @adapter: the adapter
2298 * @id: the completion queue context id
2300 * Disable an SGE completion queue. The caller is responsible for
2301 * ensuring only one context operation occurs at a time.
2303 int t3_sge_disable_cqcntxt(adapter_t *adapter, unsigned int id)
2305 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2308 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2309 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2310 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2311 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2312 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2313 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2314 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2315 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2316 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2320 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2321 * @adapter: the adapter
2322 * @id: the context id
2323 * @op: the operation to perform
2324 * @credits: credits to return to the CQ
2326 * Perform the selected operation on an SGE completion queue context.
2327 * The caller is responsible for ensuring only one context operation
2330 * For most operations the function returns the current HW position in
2331 * the completion queue.
2333 int t3_sge_cqcntxt_op(adapter_t *adapter, unsigned int id, unsigned int op,
2334 unsigned int credits)
2338 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2341 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2342 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2343 V_CONTEXT(id) | F_CQ);
2344 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2345 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2348 if (op >= 2 && op < 7) {
2349 if (adapter->params.rev > 0)
2350 return G_CQ_INDEX(val);
2352 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2353 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2354 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2355 F_CONTEXT_CMD_BUSY, 0,
2356 SG_CONTEXT_CMD_ATTEMPTS, 1))
2358 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2364 * t3_sge_read_context - read an SGE context
2365 * @type: the context type
2366 * @adapter: the adapter
2367 * @id: the context id
2368 * @data: holds the retrieved context
2370 * Read an SGE egress context. The caller is responsible for ensuring
2371 * only one context operation occurs at a time.
2373 static int t3_sge_read_context(unsigned int type, adapter_t *adapter,
2374 unsigned int id, u32 data[4])
2376 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2379 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2380 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2381 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2382 SG_CONTEXT_CMD_ATTEMPTS, 1))
2384 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2385 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2386 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2387 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2392 * t3_sge_read_ecntxt - read an SGE egress context
2393 * @adapter: the adapter
2394 * @id: the context id
2395 * @data: holds the retrieved context
2397 * Read an SGE egress context. The caller is responsible for ensuring
2398 * only one context operation occurs at a time.
2400 int t3_sge_read_ecntxt(adapter_t *adapter, unsigned int id, u32 data[4])
2404 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2408 * t3_sge_read_cq - read an SGE CQ context
2409 * @adapter: the adapter
2410 * @id: the context id
2411 * @data: holds the retrieved context
2413 * Read an SGE CQ context. The caller is responsible for ensuring
2414 * only one context operation occurs at a time.
2416 int t3_sge_read_cq(adapter_t *adapter, unsigned int id, u32 data[4])
2420 return t3_sge_read_context(F_CQ, adapter, id, data);
2424 * t3_sge_read_fl - read an SGE free-list context
2425 * @adapter: the adapter
2426 * @id: the context id
2427 * @data: holds the retrieved context
2429 * Read an SGE free-list context. The caller is responsible for ensuring
2430 * only one context operation occurs at a time.
2432 int t3_sge_read_fl(adapter_t *adapter, unsigned int id, u32 data[4])
2434 if (id >= SGE_QSETS * 2)
2436 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2440 * t3_sge_read_rspq - read an SGE response queue context
2441 * @adapter: the adapter
2442 * @id: the context id
2443 * @data: holds the retrieved context
2445 * Read an SGE response queue context. The caller is responsible for
2446 * ensuring only one context operation occurs at a time.
2448 int t3_sge_read_rspq(adapter_t *adapter, unsigned int id, u32 data[4])
2450 if (id >= SGE_QSETS)
2452 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2456 * t3_config_rss - configure Rx packet steering
2457 * @adapter: the adapter
2458 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2459 * @cpus: values for the CPU lookup table (0xff terminated)
2460 * @rspq: values for the response queue lookup table (0xffff terminated)
2462 * Programs the receive packet steering logic. @cpus and @rspq provide
2463 * the values for the CPU and response queue lookup tables. If they
2464 * provide fewer values than the size of the tables the supplied values
2465 * are used repeatedly until the tables are fully populated.
2467 void t3_config_rss(adapter_t *adapter, unsigned int rss_config, const u8 *cpus,
2470 int i, j, cpu_idx = 0, q_idx = 0;
2473 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2476 for (j = 0; j < 2; ++j) {
2477 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2478 if (cpus[cpu_idx] == 0xff)
2481 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2485 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2486 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2487 (i << 16) | rspq[q_idx++]);
2488 if (rspq[q_idx] == 0xffff)
2492 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2496 * t3_read_rss - read the contents of the RSS tables
2497 * @adapter: the adapter
2498 * @lkup: holds the contents of the RSS lookup table
2499 * @map: holds the contents of the RSS map table
2501 * Reads the contents of the receive packet steering tables.
2503 int t3_read_rss(adapter_t *adapter, u8 *lkup, u16 *map)
2509 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2510 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2512 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2513 if (!(val & 0x80000000))
2516 *lkup++ = (u8)(val >> 8);
2520 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2521 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2523 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2524 if (!(val & 0x80000000))
2532 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2533 * @adap: the adapter
2534 * @enable: 1 to select offload mode, 0 for regular NIC
2536 * Switches TP to NIC/offload mode.
2538 void t3_tp_set_offload_mode(adapter_t *adap, int enable)
2540 if (is_offload(adap) || !enable)
2541 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2542 V_NICMODE(!enable));
2546 * tp_wr_bits_indirect - set/clear bits in an indirect TP register
2547 * @adap: the adapter
2548 * @addr: the indirect TP register address
2549 * @mask: specifies the field within the register to modify
2550 * @val: new value for the field
2552 * Sets a field of an indirect TP register to the given value.
2554 static void tp_wr_bits_indirect(adapter_t *adap, unsigned int addr,
2555 unsigned int mask, unsigned int val)
2557 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2558 val |= t3_read_reg(adap, A_TP_PIO_DATA) & ~mask;
2559 t3_write_reg(adap, A_TP_PIO_DATA, val);
2563 * pm_num_pages - calculate the number of pages of the payload memory
2564 * @mem_size: the size of the payload memory
2565 * @pg_size: the size of each payload memory page
2567 * Calculate the number of pages, each of the given size, that fit in a
2568 * memory of the specified size, respecting the HW requirement that the
2569 * number of pages must be a multiple of 24.
2571 static inline unsigned int pm_num_pages(unsigned int mem_size,
2572 unsigned int pg_size)
2574 unsigned int n = mem_size / pg_size;
2579 #define mem_region(adap, start, size, reg) \
2580 t3_write_reg((adap), A_ ## reg, (start)); \
2584 * partition_mem - partition memory and configure TP memory settings
2585 * @adap: the adapter
2586 * @p: the TP parameters
2588 * Partitions context and payload memory and configures TP's memory
2591 static void partition_mem(adapter_t *adap, const struct tp_params *p)
2593 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2594 unsigned int timers = 0, timers_shift = 22;
2596 if (adap->params.rev > 0) {
2597 if (tids <= 16 * 1024) {
2600 } else if (tids <= 64 * 1024) {
2603 } else if (tids <= 256 * 1024) {
2609 t3_write_reg(adap, A_TP_PMM_SIZE,
2610 p->chan_rx_size | (p->chan_tx_size >> 16));
2612 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2613 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2614 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2615 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2616 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2618 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2619 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2620 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2622 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2623 /* Add a bit of headroom and make multiple of 24 */
2625 pstructs -= pstructs % 24;
2626 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2628 m = tids * TCB_SIZE;
2629 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2630 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2631 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2632 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2633 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2634 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2635 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2636 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2638 m = (m + 4095) & ~0xfff;
2639 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2640 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2642 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2643 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2644 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2646 adap->params.mc5.nservers += m - tids;
2649 static inline void tp_wr_indirect(adapter_t *adap, unsigned int addr, u32 val)
2651 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2652 t3_write_reg(adap, A_TP_PIO_DATA, val);
2655 static void tp_config(adapter_t *adap, const struct tp_params *p)
2657 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2658 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2659 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2660 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2661 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2662 V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
2663 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2664 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2665 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2666 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2667 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
2668 F_IPV6ENABLE | F_NICMODE);
2669 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2670 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2671 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2672 adap->params.rev > 0 ? F_ENABLEESND :
2674 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2676 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2677 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2678 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2679 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2680 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
2681 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2682 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2684 if (adap->params.rev > 0) {
2685 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2686 t3_set_reg_field(adap, A_TP_PARA_REG3, 0,
2687 F_TXPACEAUTO | F_TXPACEAUTOSTRICT);
2688 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2689 tp_wr_indirect(adap, A_TP_VLAN_PRI_MAP, 0xfa50);
2690 tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP0, 0xfac688);
2691 tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP1, 0xfac688);
2693 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2695 if (adap->params.rev == T3_REV_C)
2696 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2697 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2698 V_TABLELATENCYDELTA(4));
2700 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2701 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2702 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2703 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2705 if (adap->params.nports > 2) {
2706 t3_set_reg_field(adap, A_TP_PC_CONFIG2, 0,
2707 F_ENABLETXPORTFROMDA | F_ENABLERXPORTFROMADDR);
2708 tp_wr_bits_indirect(adap, A_TP_QOS_RX_MAP_MODE,
2709 V_RXMAPMODE(M_RXMAPMODE), 0);
2710 tp_wr_indirect(adap, A_TP_INGRESS_CONFIG, V_BITPOS0(48) |
2711 V_BITPOS1(49) | V_BITPOS2(50) | V_BITPOS3(51) |
2712 F_ENABLEEXTRACT | F_ENABLEEXTRACTIONSFD |
2713 F_ENABLEINSERTION | F_ENABLEINSERTIONSFD);
2714 tp_wr_indirect(adap, A_TP_PREAMBLE_MSB, 0xfb000000);
2715 tp_wr_indirect(adap, A_TP_PREAMBLE_LSB, 0xd5);
2716 tp_wr_indirect(adap, A_TP_INTF_FROM_TX_PKT, F_INTFFROMTXPKT);
2720 /* TCP timer values in ms */
2721 #define TP_DACK_TIMER 50
2722 #define TP_RTO_MIN 250
2725 * tp_set_timers - set TP timing parameters
2726 * @adap: the adapter to set
2727 * @core_clk: the core clock frequency in Hz
2729 * Set TP's timing parameters, such as the various timer resolutions and
2730 * the TCP timer values.
2732 static void tp_set_timers(adapter_t *adap, unsigned int core_clk)
2734 unsigned int tre = adap->params.tp.tre;
2735 unsigned int dack_re = adap->params.tp.dack_re;
2736 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2737 unsigned int tps = core_clk >> tre;
2739 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2740 V_DELAYEDACKRESOLUTION(dack_re) |
2741 V_TIMESTAMPRESOLUTION(tstamp_re));
2742 t3_write_reg(adap, A_TP_DACK_TIMER,
2743 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2744 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2745 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2746 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2747 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2748 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2749 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2750 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2753 #define SECONDS * tps
2755 t3_write_reg(adap, A_TP_MSL,
2756 adap->params.rev > 0 ? 0 : 2 SECONDS);
2757 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2758 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2759 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2760 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2761 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2762 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2763 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2764 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2769 #ifdef CONFIG_CHELSIO_T3_CORE
2771 * t3_tp_set_coalescing_size - set receive coalescing size
2772 * @adap: the adapter
2773 * @size: the receive coalescing size
2774 * @psh: whether a set PSH bit should deliver coalesced data
2776 * Set the receive coalescing size and PSH bit handling.
2778 int t3_tp_set_coalescing_size(adapter_t *adap, unsigned int size, int psh)
2782 if (size > MAX_RX_COALESCING_LEN)
2785 val = t3_read_reg(adap, A_TP_PARA_REG3);
2786 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2789 val |= F_RXCOALESCEENABLE;
2791 val |= F_RXCOALESCEPSHEN;
2792 size = min(MAX_RX_COALESCING_LEN, size);
2793 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2794 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2796 t3_write_reg(adap, A_TP_PARA_REG3, val);
2801 * t3_tp_set_max_rxsize - set the max receive size
2802 * @adap: the adapter
2803 * @size: the max receive size
2805 * Set TP's max receive size. This is the limit that applies when
2806 * receive coalescing is disabled.
2808 void t3_tp_set_max_rxsize(adapter_t *adap, unsigned int size)
2810 t3_write_reg(adap, A_TP_PARA_REG7,
2811 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2814 static void __devinit init_mtus(unsigned short mtus[])
2817 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2818 * it can accomodate max size TCP/IP headers when SACK and timestamps
2819 * are enabled and still have at least 8 bytes of payload.
2840 * init_cong_ctrl - initialize congestion control parameters
2841 * @a: the alpha values for congestion control
2842 * @b: the beta values for congestion control
2844 * Initialize the congestion control parameters.
2846 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
2848 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2873 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2876 b[13] = b[14] = b[15] = b[16] = 3;
2877 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2878 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2883 /* The minimum additive increment value for the congestion control table */
2884 #define CC_MIN_INCR 2U
2887 * t3_load_mtus - write the MTU and congestion control HW tables
2888 * @adap: the adapter
2889 * @mtus: the unrestricted values for the MTU table
2890 * @alpha: the values for the congestion control alpha parameter
2891 * @beta: the values for the congestion control beta parameter
2892 * @mtu_cap: the maximum permitted effective MTU
2894 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2895 * Update the high-speed congestion control table with the supplied alpha,
2898 void t3_load_mtus(adapter_t *adap, unsigned short mtus[NMTUS],
2899 unsigned short alpha[NCCTRL_WIN],
2900 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2902 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2903 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2904 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2905 28672, 40960, 57344, 81920, 114688, 163840, 229376 };
2909 for (i = 0; i < NMTUS; ++i) {
2910 unsigned int mtu = min(mtus[i], mtu_cap);
2911 unsigned int log2 = fls(mtu);
2913 if (!(mtu & ((1 << log2) >> 2))) /* round */
2915 t3_write_reg(adap, A_TP_MTU_TABLE,
2916 (i << 24) | (log2 << 16) | mtu);
2918 for (w = 0; w < NCCTRL_WIN; ++w) {
2921 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2924 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2925 (w << 16) | (beta[w] << 13) | inc);
2931 * t3_read_hw_mtus - returns the values in the HW MTU table
2932 * @adap: the adapter
2933 * @mtus: where to store the HW MTU values
2935 * Reads the HW MTU table.
2937 void t3_read_hw_mtus(adapter_t *adap, unsigned short mtus[NMTUS])
2941 for (i = 0; i < NMTUS; ++i) {
2944 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2945 val = t3_read_reg(adap, A_TP_MTU_TABLE);
2946 mtus[i] = val & 0x3fff;
2951 * t3_get_cong_cntl_tab - reads the congestion control table
2952 * @adap: the adapter
2953 * @incr: where to store the alpha values
2955 * Reads the additive increments programmed into the HW congestion
2958 void t3_get_cong_cntl_tab(adapter_t *adap,
2959 unsigned short incr[NMTUS][NCCTRL_WIN])
2961 unsigned int mtu, w;
2963 for (mtu = 0; mtu < NMTUS; ++mtu)
2964 for (w = 0; w < NCCTRL_WIN; ++w) {
2965 t3_write_reg(adap, A_TP_CCTRL_TABLE,
2966 0xffff0000 | (mtu << 5) | w);
2967 incr[mtu][w] = (unsigned short)t3_read_reg(adap,
2968 A_TP_CCTRL_TABLE) & 0x1fff;
2973 * t3_tp_get_mib_stats - read TP's MIB counters
2974 * @adap: the adapter
2975 * @tps: holds the returned counter values
2977 * Returns the values of TP's MIB counters.
2979 void t3_tp_get_mib_stats(adapter_t *adap, struct tp_mib_stats *tps)
2981 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *)tps,
2982 sizeof(*tps) / sizeof(u32), 0);
2986 * t3_read_pace_tbl - read the pace table
2987 * @adap: the adapter
2988 * @pace_vals: holds the returned values
2990 * Returns the values of TP's pace table in nanoseconds.
2992 void t3_read_pace_tbl(adapter_t *adap, unsigned int pace_vals[NTX_SCHED])
2994 unsigned int i, tick_ns = dack_ticks_to_usec(adap, 1000);
2996 for (i = 0; i < NTX_SCHED; i++) {
2997 t3_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
2998 pace_vals[i] = t3_read_reg(adap, A_TP_PACE_TABLE) * tick_ns;
3003 * t3_set_pace_tbl - set the pace table
3004 * @adap: the adapter
3005 * @pace_vals: the pace values in nanoseconds
3006 * @start: index of the first entry in the HW pace table to set
3007 * @n: how many entries to set
3009 * Sets (a subset of the) HW pace table.
3011 void t3_set_pace_tbl(adapter_t *adap, unsigned int *pace_vals,
3012 unsigned int start, unsigned int n)
3014 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
3016 for ( ; n; n--, start++, pace_vals++)
3017 t3_write_reg(adap, A_TP_PACE_TABLE, (start << 16) |
3018 ((*pace_vals + tick_ns / 2) / tick_ns));
3021 #define ulp_region(adap, name, start, len) \
3022 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
3023 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
3024 (start) + (len) - 1); \
3027 #define ulptx_region(adap, name, start, len) \
3028 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
3029 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
3030 (start) + (len) - 1)
3032 static void ulp_config(adapter_t *adap, const struct tp_params *p)
3034 unsigned int m = p->chan_rx_size;
3036 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
3037 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
3038 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
3039 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
3040 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
3041 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
3042 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
3043 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
3048 * t3_set_proto_sram - set the contents of the protocol sram
3049 * @adapter: the adapter
3050 * @data: the protocol image
3052 * Write the contents of the protocol SRAM.
3054 int t3_set_proto_sram(adapter_t *adap, const u8 *data)
3057 const u32 *buf = (const u32 *)data;
3059 for (i = 0; i < PROTO_SRAM_LINES; i++) {
3060 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, cpu_to_be32(*buf++));
3061 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, cpu_to_be32(*buf++));
3062 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, cpu_to_be32(*buf++));
3063 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, cpu_to_be32(*buf++));
3064 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, cpu_to_be32(*buf++));
3066 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
3067 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
3075 * t3_config_trace_filter - configure one of the tracing filters
3076 * @adapter: the adapter
3077 * @tp: the desired trace filter parameters
3078 * @filter_index: which filter to configure
3079 * @invert: if set non-matching packets are traced instead of matching ones
3080 * @enable: whether to enable or disable the filter
3082 * Configures one of the tracing filters available in HW.
3084 void t3_config_trace_filter(adapter_t *adapter, const struct trace_params *tp,
3085 int filter_index, int invert, int enable)
3087 u32 addr, key[4], mask[4];
3089 key[0] = tp->sport | (tp->sip << 16);
3090 key[1] = (tp->sip >> 16) | (tp->dport << 16);
3092 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
3094 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
3095 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
3096 mask[2] = tp->dip_mask;
3097 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
3100 key[3] |= (1 << 29);
3102 key[3] |= (1 << 28);
3104 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3105 tp_wr_indirect(adapter, addr++, key[0]);
3106 tp_wr_indirect(adapter, addr++, mask[0]);
3107 tp_wr_indirect(adapter, addr++, key[1]);
3108 tp_wr_indirect(adapter, addr++, mask[1]);
3109 tp_wr_indirect(adapter, addr++, key[2]);
3110 tp_wr_indirect(adapter, addr++, mask[2]);
3111 tp_wr_indirect(adapter, addr++, key[3]);
3112 tp_wr_indirect(adapter, addr, mask[3]);
3113 (void) t3_read_reg(adapter, A_TP_PIO_DATA);
3117 * t3_config_sched - configure a HW traffic scheduler
3118 * @adap: the adapter
3119 * @kbps: target rate in Kbps
3120 * @sched: the scheduler index
3122 * Configure a Tx HW scheduler for the target rate.
3124 int t3_config_sched(adapter_t *adap, unsigned int kbps, int sched)
3126 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3127 unsigned int clk = adap->params.vpd.cclk * 1000;
3128 unsigned int selected_cpt = 0, selected_bpt = 0;
3131 kbps *= 125; /* -> bytes */
3132 for (cpt = 1; cpt <= 255; cpt++) {
3134 bpt = (kbps + tps / 2) / tps;
3135 if (bpt > 0 && bpt <= 255) {
3137 delta = v >= kbps ? v - kbps : kbps - v;
3138 if (delta < mindelta) {
3143 } else if (selected_cpt)
3149 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3150 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3151 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3153 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3155 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3156 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3161 * t3_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
3162 * @adap: the adapter
3163 * @sched: the scheduler index
3164 * @ipg: the interpacket delay in tenths of nanoseconds
3166 * Set the interpacket delay for a HW packet rate scheduler.
3168 int t3_set_sched_ipg(adapter_t *adap, int sched, unsigned int ipg)
3170 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3172 /* convert ipg to nearest number of core clocks */
3173 ipg *= core_ticks_per_usec(adap);
3174 ipg = (ipg + 5000) / 10000;
3178 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3179 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3181 v = (v & 0xffff) | (ipg << 16);
3183 v = (v & 0xffff0000) | ipg;
3184 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3185 t3_read_reg(adap, A_TP_TM_PIO_DATA);
3190 * t3_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3191 * @adap: the adapter
3192 * @sched: the scheduler index
3193 * @kbps: the byte rate in Kbps
3194 * @ipg: the interpacket delay in tenths of nanoseconds
3196 * Return the current configuration of a HW Tx scheduler.
3198 void t3_get_tx_sched(adapter_t *adap, unsigned int sched, unsigned int *kbps,
3201 unsigned int v, addr, bpt, cpt;
3204 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3205 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3206 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3209 bpt = (v >> 8) & 0xff;
3212 *kbps = 0; /* scheduler disabled */
3214 v = (adap->params.vpd.cclk * 1000) / cpt;
3215 *kbps = (v * bpt) / 125;
3219 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3220 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3221 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3225 *ipg = (10000 * v) / core_ticks_per_usec(adap);
3230 * tp_init - configure TP
3231 * @adap: the adapter
3232 * @p: TP configuration parameters
3234 * Initializes the TP HW module.
3236 static int tp_init(adapter_t *adap, const struct tp_params *p)
3241 t3_set_vlan_accel(adap, 3, 0);
3243 if (is_offload(adap)) {
3244 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3245 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3246 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3249 CH_ERR(adap, "TP initialization timed out\n");
3253 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3258 * t3_mps_set_active_ports - configure port failover
3259 * @adap: the adapter
3260 * @port_mask: bitmap of active ports
3262 * Sets the active ports according to the supplied bitmap.
3264 int t3_mps_set_active_ports(adapter_t *adap, unsigned int port_mask)
3266 if (port_mask & ~((1 << adap->params.nports) - 1))
3268 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3269 port_mask << S_PORT0ACTIVE);
3274 * chan_init_hw - channel-dependent HW initialization
3275 * @adap: the adapter
3276 * @chan_map: bitmap of Tx channels being used
3278 * Perform the bits of HW initialization that are dependent on the Tx
3279 * channels being used.
3281 static void chan_init_hw(adapter_t *adap, unsigned int chan_map)
3285 if (chan_map != 3) { /* one channel */
3286 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3287 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3288 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3289 (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3290 F_TPTXPORT1EN | F_PORT1ACTIVE));
3291 t3_write_reg(adap, A_PM1_TX_CFG,
3292 chan_map == 1 ? 0xffffffff : 0);
3294 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3295 V_TX_MOD_QUEUE_REQ_MAP(0xff));
3296 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xd9c8);
3297 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfbea);
3298 } else { /* two channels */
3299 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3300 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3301 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3302 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3303 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3304 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3306 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3307 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3308 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3309 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3310 for (i = 0; i < 16; i++)
3311 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3312 (i << 16) | 0x1010);
3313 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xba98);
3314 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfedc);
3318 static int calibrate_xgm(adapter_t *adapter)
3320 if (uses_xaui(adapter)) {
3323 for (i = 0; i < 5; ++i) {
3324 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3325 (void) t3_read_reg(adapter, A_XGM_XAUI_IMP);
3327 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3328 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3329 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3330 V_XAUIIMP(G_CALIMP(v) >> 2));
3334 CH_ERR(adapter, "MAC calibration failed\n");
3337 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3338 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3339 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3340 F_XGM_IMPSETUPDATE);
3345 static void calibrate_xgm_t3b(adapter_t *adapter)
3347 if (!uses_xaui(adapter)) {
3348 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3349 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3350 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3351 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3352 F_XGM_IMPSETUPDATE);
3353 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3355 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3356 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3360 struct mc7_timing_params {
3361 unsigned char ActToPreDly;
3362 unsigned char ActToRdWrDly;
3363 unsigned char PreCyc;
3364 unsigned char RefCyc[5];
3365 unsigned char BkCyc;
3366 unsigned char WrToRdDly;
3367 unsigned char RdToWrDly;
3371 * Write a value to a register and check that the write completed. These
3372 * writes normally complete in a cycle or two, so one read should suffice.
3373 * The very first read exists to flush the posted write to the device.
3375 static int wrreg_wait(adapter_t *adapter, unsigned int addr, u32 val)
3377 t3_write_reg(adapter, addr, val);
3378 (void) t3_read_reg(adapter, addr); /* flush */
3379 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3381 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3385 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3387 static const unsigned int mc7_mode[] = {
3388 0x632, 0x642, 0x652, 0x432, 0x442
3390 static const struct mc7_timing_params mc7_timings[] = {
3391 { 12, 3, 4, { 20, 28, 34, 52, 0 }, 15, 6, 4 },
3392 { 12, 4, 5, { 20, 28, 34, 52, 0 }, 16, 7, 4 },
3393 { 12, 5, 6, { 20, 28, 34, 52, 0 }, 17, 8, 4 },
3394 { 9, 3, 4, { 15, 21, 26, 39, 0 }, 12, 6, 4 },
3395 { 9, 4, 5, { 15, 21, 26, 39, 0 }, 13, 7, 4 }
3399 unsigned int width, density, slow, attempts;
3400 adapter_t *adapter = mc7->adapter;
3401 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3406 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3407 slow = val & F_SLOW;
3408 width = G_WIDTH(val);
3409 density = G_DEN(val);
3411 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3412 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3416 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3417 (void) t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3419 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3420 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3421 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3427 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3428 V_ACTTOPREDLY(p->ActToPreDly) |
3429 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3430 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3431 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3433 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3434 val | F_CLKEN | F_TERM150);
3435 (void) t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3438 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3443 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3444 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3445 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3446 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3450 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3451 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL,
3456 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3457 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3458 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3459 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3460 mc7_mode[mem_type]) ||
3461 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3462 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3465 /* clock value is in KHz */
3466 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3467 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3469 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3470 F_PERREFEN | V_PREREFDIV(mc7_clock));
3471 (void) t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3473 t3_write_reg(adapter, mc7->offset + A_MC7_ECC,
3474 F_ECCGENEN | F_ECCCHKEN);
3475 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3476 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3477 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3478 (mc7->size << width) - 1);
3479 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3480 (void) t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3485 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3486 } while ((val & F_BUSY) && --attempts);
3488 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3492 /* Enable normal memory accesses. */
3493 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3500 static void config_pcie(adapter_t *adap)
3502 static const u16 ack_lat[4][6] = {
3503 { 237, 416, 559, 1071, 2095, 4143 },
3504 { 128, 217, 289, 545, 1057, 2081 },
3505 { 73, 118, 154, 282, 538, 1050 },
3506 { 67, 107, 86, 150, 278, 534 }
3508 static const u16 rpl_tmr[4][6] = {
3509 { 711, 1248, 1677, 3213, 6285, 12429 },
3510 { 384, 651, 867, 1635, 3171, 6243 },
3511 { 219, 354, 462, 846, 1614, 3150 },
3512 { 201, 321, 258, 450, 834, 1602 }
3516 unsigned int log2_width, pldsize;
3517 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3519 t3_os_pci_read_config_2(adap,
3520 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3522 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3524 t3_os_pci_read_config_2(adap,
3525 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3528 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3529 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3530 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3531 log2_width = fls(adap->params.pci.width) - 1;
3532 acklat = ack_lat[log2_width][pldsize];
3533 if (val & 1) /* check LOsEnable */
3534 acklat += fst_trn_tx * 4;
3535 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3537 if (adap->params.rev == 0)
3538 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3539 V_T3A_ACKLAT(M_T3A_ACKLAT),
3540 V_T3A_ACKLAT(acklat));
3542 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3545 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3546 V_REPLAYLMT(rpllmt));
3548 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3549 t3_set_reg_field(adap, A_PCIE_CFG, 0,
3550 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
3554 * t3_init_hw - initialize and configure T3 HW modules
3555 * @adapter: the adapter
3556 * @fw_params: initial parameters to pass to firmware (optional)
3558 * Initialize and configure T3 HW modules. This performs the
3559 * initialization steps that need to be done once after a card is reset.
3560 * MAC and PHY initialization is handled separarely whenever a port is
3563 * @fw_params are passed to FW and their value is platform dependent.
3564 * Only the top 8 bits are available for use, the rest must be 0.
3566 int t3_init_hw(adapter_t *adapter, u32 fw_params)
3568 int err = -EIO, attempts, i;
3569 const struct vpd_params *vpd = &adapter->params.vpd;
3571 if (adapter->params.rev > 0)
3572 calibrate_xgm_t3b(adapter);
3573 else if (calibrate_xgm(adapter))
3576 if (adapter->params.nports > 2)
3577 t3_mac_reset(&adap2pinfo(adapter, 0)->mac);
3580 partition_mem(adapter, &adapter->params.tp);
3582 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3583 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3584 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3585 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3586 adapter->params.mc5.nfilters,
3587 adapter->params.mc5.nroutes))
3590 for (i = 0; i < 32; i++)
3591 if (clear_sge_ctxt(adapter, i, F_CQ))
3595 if (tp_init(adapter, &adapter->params.tp))
3598 #ifdef CONFIG_CHELSIO_T3_CORE
3599 t3_tp_set_coalescing_size(adapter,
3600 min(adapter->params.sge.max_pkt_size,
3601 MAX_RX_COALESCING_LEN), 1);
3602 t3_tp_set_max_rxsize(adapter,
3603 min(adapter->params.sge.max_pkt_size, 16384U));
3604 ulp_config(adapter, &adapter->params.tp);
3606 if (is_pcie(adapter))
3607 config_pcie(adapter);
3609 t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3610 F_DMASTOPEN | F_CLIDECEN);
3612 if (adapter->params.rev == T3_REV_C)
3613 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3614 F_CFG_CQE_SOP_MASK);
3616 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3617 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3618 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3619 chan_init_hw(adapter, adapter->params.chan_map);
3620 t3_sge_init(adapter, &adapter->params.sge);
3622 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3623 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3624 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3625 (void) t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3628 do { /* wait for uP to initialize */
3630 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3632 CH_ERR(adapter, "uP initialization timed out\n");
3642 * get_pci_mode - determine a card's PCI mode
3643 * @adapter: the adapter
3644 * @p: where to store the PCI settings
3646 * Determines a card's PCI mode and associated parameters, such as speed
3649 static void __devinit get_pci_mode(adapter_t *adapter, struct pci_params *p)
3651 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3652 u32 pci_mode, pcie_cap;
3654 pcie_cap = t3_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
3658 p->variant = PCI_VARIANT_PCIE;
3659 p->pcie_cap_addr = pcie_cap;
3660 t3_os_pci_read_config_2(adapter, pcie_cap + PCI_EXP_LNKSTA,
3662 p->width = (val >> 4) & 0x3f;
3666 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3667 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3668 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3669 pci_mode = G_PCIXINITPAT(pci_mode);
3671 p->variant = PCI_VARIANT_PCI;
3672 else if (pci_mode < 4)
3673 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3674 else if (pci_mode < 8)
3675 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3677 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3681 * init_link_config - initialize a link's SW state
3682 * @lc: structure holding the link state
3683 * @caps: link capabilities
3685 * Initializes the SW state maintained for each link, including the link's
3686 * capabilities and default speed/duplex/flow-control/autonegotiation
3689 static void __devinit init_link_config(struct link_config *lc,
3692 lc->supported = caps;
3693 lc->requested_speed = lc->speed = SPEED_INVALID;
3694 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3695 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3696 if (lc->supported & SUPPORTED_Autoneg) {
3697 lc->advertising = lc->supported;
3698 lc->autoneg = AUTONEG_ENABLE;
3699 lc->requested_fc |= PAUSE_AUTONEG;
3701 lc->advertising = 0;
3702 lc->autoneg = AUTONEG_DISABLE;
3707 * mc7_calc_size - calculate MC7 memory size
3708 * @cfg: the MC7 configuration
3710 * Calculates the size of an MC7 memory in bytes from the value of its
3711 * configuration register.
3713 static unsigned int __devinit mc7_calc_size(u32 cfg)
3715 unsigned int width = G_WIDTH(cfg);
3716 unsigned int banks = !!(cfg & F_BKS) + 1;
3717 unsigned int org = !!(cfg & F_ORG) + 1;
3718 unsigned int density = G_DEN(cfg);
3719 unsigned int MBs = ((256 << density) * banks) / (org << width);
3724 static void __devinit mc7_prep(adapter_t *adapter, struct mc7 *mc7,
3725 unsigned int base_addr, const char *name)
3729 mc7->adapter = adapter;
3731 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3732 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3733 mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3734 mc7->width = G_WIDTH(cfg);
3737 void mac_prep(struct cmac *mac, adapter_t *adapter, int index)
3739 mac->adapter = adapter;
3740 mac->multiport = adapter->params.nports > 2;
3741 if (mac->multiport) {
3742 mac->ext_port = (unsigned char)index;
3748 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3750 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3751 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3752 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3753 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3759 * early_hw_init - HW initialization done at card detection time
3760 * @adapter: the adapter
3761 * @ai: contains information about the adapter type and properties
3763 * Perfoms the part of HW initialization that is done early on when the
3764 * driver first detecs the card. Most of the HW state is initialized
3765 * lazily later on when a port or an offload function are first used.
3767 void early_hw_init(adapter_t *adapter, const struct adapter_info *ai)
3769 u32 val = V_PORTSPEED(is_10G(adapter) || adapter->params.nports > 2 ?
3772 mi1_init(adapter, ai);
3773 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3774 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3775 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3776 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3777 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3778 t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
3780 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3783 /* Enable MAC clocks so we can access the registers */
3784 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3785 (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
3787 val |= F_CLKDIVRESET_;
3788 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3789 (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
3790 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3791 (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
3795 * t3_reset_adapter - reset the adapter
3796 * @adapter: the adapter
3798 * Reset the adapter.
3800 static int t3_reset_adapter(adapter_t *adapter)
3802 int i, save_and_restore_pcie =
3803 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3806 if (save_and_restore_pcie)
3807 t3_os_pci_save_state(adapter);
3808 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3811 * Delay. Give Some time to device to reset fully.
3812 * XXX The delay time should be modified.
3814 for (i = 0; i < 10; i++) {
3816 t3_os_pci_read_config_2(adapter, 0x00, &devid);
3817 if (devid == 0x1425)
3821 if (devid != 0x1425)
3824 if (save_and_restore_pcie)
3825 t3_os_pci_restore_state(adapter);
3829 static int __devinit init_parity(adapter_t *adap)
3833 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3836 for (err = i = 0; !err && i < 16; i++)
3837 err = clear_sge_ctxt(adap, i, F_EGRESS);
3838 for (i = 0xfff0; !err && i <= 0xffff; i++)
3839 err = clear_sge_ctxt(adap, i, F_EGRESS);
3840 for (i = 0; !err && i < SGE_QSETS; i++)
3841 err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3845 t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3846 for (i = 0; i < 4; i++)
3847 for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3848 t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3849 F_IBQDBGWR | V_IBQDBGQID(i) |
3850 V_IBQDBGADDR(addr));
3851 err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3852 F_IBQDBGBUSY, 0, 2, 1);
3860 * t3_prep_adapter - prepare SW and HW for operation
3861 * @adapter: the adapter
3862 * @ai: contains information about the adapter type and properties
3864 * Initialize adapter SW state for the various HW modules, set initial
3865 * values for some adapter tunables, take PHYs out of reset, and
3866 * initialize the MDIO interface.
3868 int __devinit t3_prep_adapter(adapter_t *adapter,
3869 const struct adapter_info *ai, int reset)
3872 unsigned int i, j = 0;
3874 get_pci_mode(adapter, &adapter->params.pci);
3876 adapter->params.info = ai;
3877 adapter->params.nports = ai->nports0 + ai->nports1;
3878 adapter->params.chan_map = !!ai->nports0 | (!!ai->nports1 << 1);
3879 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3880 adapter->params.linkpoll_period = 0;
3881 if (adapter->params.nports > 2)
3882 adapter->params.stats_update_period = VSC_STATS_ACCUM_SECS;
3884 adapter->params.stats_update_period = is_10G(adapter) ?
3885 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3886 adapter->params.pci.vpd_cap_addr =
3887 t3_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
3889 ret = get_vpd_params(adapter, &adapter->params.vpd);
3893 if (reset && t3_reset_adapter(adapter))
3896 t3_sge_prep(adapter, &adapter->params.sge);
3898 if (adapter->params.vpd.mclk) {
3899 struct tp_params *p = &adapter->params.tp;
3901 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3902 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3903 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3905 p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
3906 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3907 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3908 p->cm_size = t3_mc7_size(&adapter->cm);
3909 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3910 p->chan_tx_size = p->pmtx_size / p->nchan;
3911 p->rx_pg_size = 64 * 1024;
3912 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3913 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3914 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3915 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3916 adapter->params.rev > 0 ? 12 : 6;
3917 p->tre = fls(adapter->params.vpd.cclk / (1000 / TP_TMR_RES)) -
3919 p->dack_re = fls(adapter->params.vpd.cclk / 10) - 1; /* 100us */
3922 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3923 t3_mc7_size(&adapter->pmtx) &&
3924 t3_mc7_size(&adapter->cm);
3926 if (is_offload(adapter)) {
3927 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3928 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3929 DEFAULT_NFILTERS : 0;
3930 adapter->params.mc5.nroutes = 0;
3931 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3933 #ifdef CONFIG_CHELSIO_T3_CORE
3934 init_mtus(adapter->params.mtus);
3935 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3939 early_hw_init(adapter, ai);
3940 ret = init_parity(adapter);
3944 if (adapter->params.nports > 2 &&
3945 (ret = t3_vsc7323_init(adapter, adapter->params.nports)))
3948 for_each_port(adapter, i) {
3950 const struct port_type_info *pti;
3951 struct port_info *p = adap2pinfo(adapter, i);
3953 while (!adapter->params.vpd.port_type[j])
3956 pti = &port_types[adapter->params.vpd.port_type[j]];
3957 ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3961 mac_prep(&p->mac, adapter, j);
3965 * The VPD EEPROM stores the base Ethernet address for the
3966 * card. A port's address is derived from the base by adding
3967 * the port's index to the base's low octet.
3969 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3970 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3972 t3_os_set_hw_addr(adapter, i, hw_addr);
3973 init_link_config(&p->link_config, p->phy.caps);
3974 p->phy.ops->power_down(&p->phy, 1);
3975 if (!(p->phy.caps & SUPPORTED_IRQ))
3976 adapter->params.linkpoll_period = 10;
3982 void t3_led_ready(adapter_t *adapter)
3984 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3988 void t3_port_failover(adapter_t *adapter, int port)
3992 val = port ? F_PORT1ACTIVE : F_PORT0ACTIVE;
3993 t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
3997 void t3_failover_done(adapter_t *adapter, int port)
3999 t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4000 F_PORT0ACTIVE | F_PORT1ACTIVE);
4003 void t3_failover_clear(adapter_t *adapter)
4005 t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4006 F_PORT0ACTIVE | F_PORT1ACTIVE);