1 /**************************************************************************
3 Copyright (c) 2007-2008, Chelsio Inc.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
28 ***************************************************************************/
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
34 #include <cxgb_include.h>
37 #define msleep t3_os_sleep
40 * t3_wait_op_done_val - wait until an operation is completed
41 * @adapter: the adapter performing the operation
42 * @reg: the register to check for completion
43 * @mask: a single-bit field within @reg that indicates completion
44 * @polarity: the value of the field when the operation is completed
45 * @attempts: number of check iterations
46 * @delay: delay in usecs between iterations
47 * @valp: where to store the value of the register at completion time
49 * Wait until an operation is completed by checking a bit in a register
50 * up to @attempts times. If @valp is not NULL the value of the register
51 * at the time it indicated completion is stored there. Returns 0 if the
52 * operation completes and -EAGAIN otherwise.
54 int t3_wait_op_done_val(adapter_t *adapter, int reg, u32 mask, int polarity,
55 int attempts, int delay, u32 *valp)
58 u32 val = t3_read_reg(adapter, reg);
60 if (!!(val & mask) == polarity) {
73 * t3_write_regs - write a bunch of registers
74 * @adapter: the adapter to program
75 * @p: an array of register address/register value pairs
76 * @n: the number of address/value pairs
77 * @offset: register address offset
79 * Takes an array of register address/register value pairs and writes each
80 * value to the corresponding register. Register addresses are adjusted
81 * by the supplied offset.
83 void t3_write_regs(adapter_t *adapter, const struct addr_val_pair *p, int n,
87 t3_write_reg(adapter, p->reg_addr + offset, p->val);
93 * t3_set_reg_field - set a register field to a value
94 * @adapter: the adapter to program
95 * @addr: the register address
96 * @mask: specifies the portion of the register to modify
97 * @val: the new value for the register field
99 * Sets a register field specified by the supplied mask to the
102 void t3_set_reg_field(adapter_t *adapter, unsigned int addr, u32 mask, u32 val)
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
106 t3_write_reg(adapter, addr, v | val);
107 (void) t3_read_reg(adapter, addr); /* flush */
111 * t3_read_indirect - read indirectly addressed registers
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
119 * Reads registers that are accessed indirectly through an address/data
122 static void t3_read_indirect(adapter_t *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals, unsigned int nregs,
124 unsigned int start_idx)
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
143 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
146 static int shift[] = { 0, 0, 16, 24 };
147 static int step[] = { 0, 32, 16, 8 };
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 adapter_t *adap = mc7->adapter;
152 if (start >= size64 || start + n > size64)
155 start *= (8 << mc7->width);
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR,
166 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
167 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
168 while ((val & F_BUSY) && attempts--)
169 val = t3_read_reg(adap,
170 mc7->offset + A_MC7_BD_OP);
174 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
175 if (mc7->width == 0) {
176 val64 = t3_read_reg(adap,
177 mc7->offset + A_MC7_BD_DATA0);
178 val64 |= (u64)val << 32;
181 val >>= shift[mc7->width];
182 val64 |= (u64)val << (step[mc7->width] * i);
194 static void mi1_init(adapter_t *adap, const struct adapter_info *ai)
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
197 u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
200 if (!(ai->caps & SUPPORTED_10000baseT_Full))
202 t3_write_reg(adap, A_MI1_CFG, val);
205 #define MDIO_ATTEMPTS 20
208 * MI1 read/write operations for direct-addressed PHYs.
210 static int mi1_read(adapter_t *adapter, int phy_addr, int mmd_addr,
211 int reg_addr, unsigned int *valp)
214 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
220 t3_write_reg(adapter, A_MI1_ADDR, addr);
221 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
222 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
224 *valp = t3_read_reg(adapter, A_MI1_DATA);
225 MDIO_UNLOCK(adapter);
229 static int mi1_write(adapter_t *adapter, int phy_addr, int mmd_addr,
230 int reg_addr, unsigned int val)
233 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
239 t3_write_reg(adapter, A_MI1_ADDR, addr);
240 t3_write_reg(adapter, A_MI1_DATA, val);
241 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
242 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
243 MDIO_UNLOCK(adapter);
247 static struct mdio_ops mi1_mdio_ops = {
253 * MI1 read/write operations for indirect-addressed PHYs.
255 static int mi1_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
256 int reg_addr, unsigned int *valp)
259 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
262 t3_write_reg(adapter, A_MI1_ADDR, addr);
263 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
264 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
265 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
267 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
268 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
271 *valp = t3_read_reg(adapter, A_MI1_DATA);
273 MDIO_UNLOCK(adapter);
277 static int mi1_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
278 int reg_addr, unsigned int val)
281 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
284 t3_write_reg(adapter, A_MI1_ADDR, addr);
285 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
286 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
287 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
289 t3_write_reg(adapter, A_MI1_DATA, val);
290 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
291 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
294 MDIO_UNLOCK(adapter);
298 static struct mdio_ops mi1_mdio_ext_ops = {
304 * t3_mdio_change_bits - modify the value of a PHY register
305 * @phy: the PHY to operate on
306 * @mmd: the device address
307 * @reg: the register address
308 * @clear: what part of the register value to mask off
309 * @set: what part of the register value to set
311 * Changes the value of a PHY register by applying a mask to its current
312 * value and ORing the result with a new value.
314 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
320 ret = mdio_read(phy, mmd, reg, &val);
323 ret = mdio_write(phy, mmd, reg, val | set);
329 * t3_phy_reset - reset a PHY block
330 * @phy: the PHY to operate on
331 * @mmd: the device address of the PHY block to reset
332 * @wait: how long to wait for the reset to complete in 1ms increments
334 * Resets a PHY block and optionally waits for the reset to complete.
335 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
338 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
343 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
348 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
354 } while (ctl && --wait);
360 * t3_phy_advertise - set the PHY advertisement registers for autoneg
361 * @phy: the PHY to operate on
362 * @advert: bitmap of capabilities the PHY should advertise
364 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
365 * requested capabilities.
367 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
370 unsigned int val = 0;
372 err = mdio_read(phy, 0, MII_CTRL1000, &val);
376 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
377 if (advert & ADVERTISED_1000baseT_Half)
378 val |= ADVERTISE_1000HALF;
379 if (advert & ADVERTISED_1000baseT_Full)
380 val |= ADVERTISE_1000FULL;
382 err = mdio_write(phy, 0, MII_CTRL1000, val);
387 if (advert & ADVERTISED_10baseT_Half)
388 val |= ADVERTISE_10HALF;
389 if (advert & ADVERTISED_10baseT_Full)
390 val |= ADVERTISE_10FULL;
391 if (advert & ADVERTISED_100baseT_Half)
392 val |= ADVERTISE_100HALF;
393 if (advert & ADVERTISED_100baseT_Full)
394 val |= ADVERTISE_100FULL;
395 if (advert & ADVERTISED_Pause)
396 val |= ADVERTISE_PAUSE_CAP;
397 if (advert & ADVERTISED_Asym_Pause)
398 val |= ADVERTISE_PAUSE_ASYM;
399 return mdio_write(phy, 0, MII_ADVERTISE, val);
403 * t3_phy_advertise_fiber - set fiber PHY advertisement register
404 * @phy: the PHY to operate on
405 * @advert: bitmap of capabilities the PHY should advertise
407 * Sets a fiber PHY's advertisement register to advertise the
408 * requested capabilities.
410 int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
412 unsigned int val = 0;
414 if (advert & ADVERTISED_1000baseT_Half)
415 val |= ADVERTISE_1000XHALF;
416 if (advert & ADVERTISED_1000baseT_Full)
417 val |= ADVERTISE_1000XFULL;
418 if (advert & ADVERTISED_Pause)
419 val |= ADVERTISE_1000XPAUSE;
420 if (advert & ADVERTISED_Asym_Pause)
421 val |= ADVERTISE_1000XPSE_ASYM;
422 return mdio_write(phy, 0, MII_ADVERTISE, val);
426 * t3_set_phy_speed_duplex - force PHY speed and duplex
427 * @phy: the PHY to operate on
428 * @speed: requested PHY speed
429 * @duplex: requested PHY duplex
431 * Force a 10/100/1000 PHY's speed and duplex. This also disables
432 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
434 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
439 err = mdio_read(phy, 0, MII_BMCR, &ctl);
444 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
445 if (speed == SPEED_100)
446 ctl |= BMCR_SPEED100;
447 else if (speed == SPEED_1000)
448 ctl |= BMCR_SPEED1000;
451 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
452 if (duplex == DUPLEX_FULL)
453 ctl |= BMCR_FULLDPLX;
455 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
456 ctl |= BMCR_ANENABLE;
457 return mdio_write(phy, 0, MII_BMCR, ctl);
460 int t3_phy_lasi_intr_enable(struct cphy *phy)
462 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
465 int t3_phy_lasi_intr_disable(struct cphy *phy)
467 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
470 int t3_phy_lasi_intr_clear(struct cphy *phy)
474 return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
477 int t3_phy_lasi_intr_handler(struct cphy *phy)
480 int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
484 return (status & 1) ? cphy_cause_link_change : 0;
487 static struct adapter_info t3_adap_info[] = {
489 F_GPIO2_OEN | F_GPIO4_OEN |
490 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
491 &mi1_mdio_ops, "Chelsio PE9000" },
493 F_GPIO2_OEN | F_GPIO4_OEN |
494 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
495 &mi1_mdio_ops, "Chelsio T302" },
497 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
498 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
499 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
500 &mi1_mdio_ext_ops, "Chelsio T310" },
502 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
503 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
504 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
505 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
506 &mi1_mdio_ext_ops, "Chelsio T320" },
508 F_GPIO5_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO5_OUT_VAL |
509 F_GPIO6_OUT_VAL | F_GPIO7_OUT_VAL,
510 { S_GPIO1, S_GPIO2, S_GPIO3, S_GPIO4 }, SUPPORTED_AUI,
511 &mi1_mdio_ops, "Chelsio T304" },
515 * Return the adapter_info structure with a given index. Out-of-range indices
518 const struct adapter_info *t3_get_adapter_info(unsigned int id)
520 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
523 struct port_type_info {
524 int (*phy_prep)(struct cphy *phy, adapter_t *adapter, int phy_addr,
525 const struct mdio_ops *ops);
528 static struct port_type_info port_types[] = {
530 { t3_ael1002_phy_prep },
531 { t3_vsc8211_phy_prep },
532 { t3_mv88e1xxx_phy_prep },
533 { t3_xaui_direct_phy_prep },
534 { t3_ael2005_phy_prep },
535 { t3_qt2045_phy_prep },
536 { t3_ael1006_phy_prep },
537 { t3_tn1010_phy_prep },
540 #define VPD_ENTRY(name, len) \
541 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
544 * Partial EEPROM Vital Product Data structure. Includes only the ID and
553 VPD_ENTRY(pn, 16); /* part number */
554 VPD_ENTRY(ec, 16); /* EC level */
555 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
556 VPD_ENTRY(na, 12); /* MAC address base */
557 VPD_ENTRY(cclk, 6); /* core clock */
558 VPD_ENTRY(mclk, 6); /* mem clock */
559 VPD_ENTRY(uclk, 6); /* uP clk */
560 VPD_ENTRY(mdc, 6); /* MDIO clk */
561 VPD_ENTRY(mt, 2); /* mem timing */
562 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
563 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
564 VPD_ENTRY(port0, 2); /* PHY0 complex */
565 VPD_ENTRY(port1, 2); /* PHY1 complex */
566 VPD_ENTRY(port2, 2); /* PHY2 complex */
567 VPD_ENTRY(port3, 2); /* PHY3 complex */
568 VPD_ENTRY(rv, 1); /* csum */
569 u32 pad; /* for multiple-of-4 sizing and alignment */
572 #define EEPROM_MAX_POLL 40
573 #define EEPROM_STAT_ADDR 0x4000
574 #define VPD_BASE 0xc00
577 * t3_seeprom_read - read a VPD EEPROM location
578 * @adapter: adapter to read
579 * @addr: EEPROM address
580 * @data: where to store the read data
582 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
583 * VPD ROM capability. A zero is written to the flag bit when the
584 * addres is written to the control register. The hardware device will
585 * set the flag to 1 when 4 bytes have been read into the data register.
587 int t3_seeprom_read(adapter_t *adapter, u32 addr, u32 *data)
590 int attempts = EEPROM_MAX_POLL;
591 unsigned int base = adapter->params.pci.vpd_cap_addr;
593 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
596 t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR, (u16)addr);
599 t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
600 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
602 if (!(val & PCI_VPD_ADDR_F)) {
603 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
606 t3_os_pci_read_config_4(adapter, base + PCI_VPD_DATA, data);
607 *data = le32_to_cpu(*data);
612 * t3_seeprom_write - write a VPD EEPROM location
613 * @adapter: adapter to write
614 * @addr: EEPROM address
615 * @data: value to write
617 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
618 * VPD ROM capability.
620 int t3_seeprom_write(adapter_t *adapter, u32 addr, u32 data)
623 int attempts = EEPROM_MAX_POLL;
624 unsigned int base = adapter->params.pci.vpd_cap_addr;
626 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
629 t3_os_pci_write_config_4(adapter, base + PCI_VPD_DATA,
631 t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR,
632 (u16)addr | PCI_VPD_ADDR_F);
635 t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
636 } while ((val & PCI_VPD_ADDR_F) && --attempts);
638 if (val & PCI_VPD_ADDR_F) {
639 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
646 * t3_seeprom_wp - enable/disable EEPROM write protection
647 * @adapter: the adapter
648 * @enable: 1 to enable write protection, 0 to disable it
650 * Enables or disables write protection on the serial EEPROM.
652 int t3_seeprom_wp(adapter_t *adapter, int enable)
654 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
658 * Convert a character holding a hex digit to a number.
660 static unsigned int hex2int(unsigned char c)
662 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
666 * get_vpd_params - read VPD parameters from VPD EEPROM
667 * @adapter: adapter to read
668 * @p: where to store the parameters
670 * Reads card parameters stored in VPD EEPROM.
672 static int get_vpd_params(adapter_t *adapter, struct vpd_params *p)
678 * Card information is normally at VPD_BASE but some early cards had
681 ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
684 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
686 for (i = 0; i < sizeof(vpd); i += 4) {
687 ret = t3_seeprom_read(adapter, addr + i,
688 (u32 *)((u8 *)&vpd + i));
693 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
694 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
695 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
696 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
697 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
698 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
700 /* Old eeproms didn't have port information */
701 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
702 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
703 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
705 p->port_type[0] = (u8)hex2int(vpd.port0_data[0]);
706 p->port_type[1] = (u8)hex2int(vpd.port1_data[0]);
707 p->port_type[2] = (u8)hex2int(vpd.port2_data[0]);
708 p->port_type[3] = (u8)hex2int(vpd.port3_data[0]);
709 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
710 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
713 for (i = 0; i < 6; i++)
714 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
715 hex2int(vpd.na_data[2 * i + 1]);
719 /* BIOS boot header */
720 typedef struct boot_header_s {
721 u8 signature[2]; /* signature */
722 u8 length; /* image length (include header) */
723 u8 offset[4]; /* initialization vector */
724 u8 reserved[19]; /* reserved */
725 u8 exheader[2]; /* offset to expansion header */
728 /* serial flash and firmware constants */
730 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
731 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
732 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
734 /* flash command opcodes */
735 SF_PROG_PAGE = 2, /* program page */
736 SF_WR_DISABLE = 4, /* disable writes */
737 SF_RD_STATUS = 5, /* read status register */
738 SF_WR_ENABLE = 6, /* enable writes */
739 SF_RD_DATA_FAST = 0xb, /* read flash */
740 SF_ERASE_SECTOR = 0xd8, /* erase sector */
742 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
743 FW_VERS_ADDR = 0x77ffc, /* flash address holding FW version */
744 FW_MIN_SIZE = 8, /* at least version and csum */
745 FW_MAX_SIZE = FW_VERS_ADDR - FW_FLASH_BOOT_ADDR,
747 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
748 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */
749 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */
750 BOOT_MIN_SIZE = sizeof(boot_header_t), /* at least basic header */
751 BOOT_MAX_SIZE = 0xff*BOOT_SIZE_INC /* 1 byte * length increment */
755 * sf1_read - read data from the serial flash
756 * @adapter: the adapter
757 * @byte_cnt: number of bytes to read
758 * @cont: whether another operation will be chained
759 * @valp: where to store the read data
761 * Reads up to 4 bytes of data from the serial flash. The location of
762 * the read needs to be specified prior to calling this by issuing the
763 * appropriate commands to the serial flash.
765 static int sf1_read(adapter_t *adapter, unsigned int byte_cnt, int cont,
770 if (!byte_cnt || byte_cnt > 4)
772 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
774 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
775 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
777 *valp = t3_read_reg(adapter, A_SF_DATA);
782 * sf1_write - write data to the serial flash
783 * @adapter: the adapter
784 * @byte_cnt: number of bytes to write
785 * @cont: whether another operation will be chained
786 * @val: value to write
788 * Writes up to 4 bytes of data to the serial flash. The location of
789 * the write needs to be specified prior to calling this by issuing the
790 * appropriate commands to the serial flash.
792 static int sf1_write(adapter_t *adapter, unsigned int byte_cnt, int cont,
795 if (!byte_cnt || byte_cnt > 4)
797 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
799 t3_write_reg(adapter, A_SF_DATA, val);
800 t3_write_reg(adapter, A_SF_OP,
801 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
802 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
806 * flash_wait_op - wait for a flash operation to complete
807 * @adapter: the adapter
808 * @attempts: max number of polls of the status register
809 * @delay: delay between polls in ms
811 * Wait for a flash operation to complete by polling the status register.
813 static int flash_wait_op(adapter_t *adapter, int attempts, int delay)
819 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
820 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
832 * t3_read_flash - read words from serial flash
833 * @adapter: the adapter
834 * @addr: the start address for the read
835 * @nwords: how many 32-bit words to read
836 * @data: where to store the read data
837 * @byte_oriented: whether to store data as bytes or as words
839 * Read the specified number of 32-bit words from the serial flash.
840 * If @byte_oriented is set the read data is stored as a byte array
841 * (i.e., big-endian), otherwise as 32-bit words in the platform's
844 int t3_read_flash(adapter_t *adapter, unsigned int addr, unsigned int nwords,
845 u32 *data, int byte_oriented)
849 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
852 addr = swab32(addr) | SF_RD_DATA_FAST;
854 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
855 (ret = sf1_read(adapter, 1, 1, data)) != 0)
858 for ( ; nwords; nwords--, data++) {
859 ret = sf1_read(adapter, 4, nwords > 1, data);
863 *data = htonl(*data);
869 * t3_write_flash - write up to a page of data to the serial flash
870 * @adapter: the adapter
871 * @addr: the start address to write
872 * @n: length of data to write
873 * @data: the data to write
874 * @byte_oriented: whether to store data as bytes or as words
876 * Writes up to a page of data (256 bytes) to the serial flash starting
877 * at the given address.
878 * If @byte_oriented is set the write data is stored as a 32-bit
879 * big-endian array, otherwise in the processor's native endianess.
882 static int t3_write_flash(adapter_t *adapter, unsigned int addr,
883 unsigned int n, const u8 *data,
888 unsigned int c, left, val, offset = addr & 0xff;
890 if (addr + n > SF_SIZE || offset + n > 256)
893 val = swab32(addr) | SF_PROG_PAGE;
895 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
896 (ret = sf1_write(adapter, 4, 1, val)) != 0)
899 for (left = n; left; left -= c) {
901 val = *(const u32*)data;
906 ret = sf1_write(adapter, c, c != left, val);
910 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
913 /* Read the page to verify the write succeeded */
914 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
919 if (memcmp(data - n, (u8 *)buf + offset, n))
925 * t3_get_tp_version - read the tp sram version
926 * @adapter: the adapter
927 * @vers: where to place the version
929 * Reads the protocol sram version from sram.
931 int t3_get_tp_version(adapter_t *adapter, u32 *vers)
935 /* Get version loaded in SRAM */
936 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
937 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
942 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
948 * t3_check_tpsram_version - read the tp sram version
949 * @adapter: the adapter
952 int t3_check_tpsram_version(adapter_t *adapter, int *must_load)
956 unsigned int major, minor;
958 if (adapter->params.rev == T3_REV_A)
963 ret = t3_get_tp_version(adapter, &vers);
967 vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
969 major = G_TP_VERSION_MAJOR(vers);
970 minor = G_TP_VERSION_MINOR(vers);
972 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
975 if (major != TP_VERSION_MAJOR)
976 CH_ERR(adapter, "found wrong TP version (%u.%u), "
977 "driver needs version %d.%d\n", major, minor,
978 TP_VERSION_MAJOR, TP_VERSION_MINOR);
981 CH_ERR(adapter, "found wrong TP version (%u.%u), "
982 "driver compiled for version %d.%d\n", major, minor,
983 TP_VERSION_MAJOR, TP_VERSION_MINOR);
989 * t3_check_tpsram - check if provided protocol SRAM
990 * is compatible with this driver
991 * @adapter: the adapter
992 * @tp_sram: the firmware image to write
995 * Checks if an adapter's tp sram is compatible with the driver.
996 * Returns 0 if the versions are compatible, a negative error otherwise.
998 int t3_check_tpsram(adapter_t *adapter, const u8 *tp_sram, unsigned int size)
1002 const u32 *p = (const u32 *)tp_sram;
1004 /* Verify checksum */
1005 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1006 csum += ntohl(p[i]);
1007 if (csum != 0xffffffff) {
1008 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
1016 enum fw_version_type {
1022 * t3_get_fw_version - read the firmware version
1023 * @adapter: the adapter
1024 * @vers: where to place the version
1026 * Reads the FW version from flash.
1028 int t3_get_fw_version(adapter_t *adapter, u32 *vers)
1030 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1034 * t3_check_fw_version - check if the FW is compatible with this driver
1035 * @adapter: the adapter
1037 * Checks if an adapter's FW is compatible with the driver. Returns 0
1038 * if the versions are compatible, a negative error otherwise.
1040 int t3_check_fw_version(adapter_t *adapter, int *must_load)
1044 unsigned int type, major, minor;
1047 ret = t3_get_fw_version(adapter, &vers);
1051 type = G_FW_VERSION_TYPE(vers);
1052 major = G_FW_VERSION_MAJOR(vers);
1053 minor = G_FW_VERSION_MINOR(vers);
1055 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1056 minor == FW_VERSION_MINOR)
1059 if (major != FW_VERSION_MAJOR)
1060 CH_ERR(adapter, "found wrong FW version(%u.%u), "
1061 "driver needs version %u.%u\n", major, minor,
1062 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1063 else if ((int)minor < FW_VERSION_MINOR) {
1065 CH_WARN(adapter, "found old FW minor version(%u.%u), "
1066 "driver compiled for version %u.%u\n", major, minor,
1067 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1069 CH_WARN(adapter, "found newer FW version(%u.%u), "
1070 "driver compiled for version %u.%u\n", major, minor,
1071 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1078 * t3_flash_erase_sectors - erase a range of flash sectors
1079 * @adapter: the adapter
1080 * @start: the first sector to erase
1081 * @end: the last sector to erase
1083 * Erases the sectors in the given range.
1085 static int t3_flash_erase_sectors(adapter_t *adapter, int start, int end)
1087 while (start <= end) {
1090 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1091 (ret = sf1_write(adapter, 4, 0,
1092 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1093 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1101 * t3_load_fw - download firmware
1102 * @adapter: the adapter
1103 * @fw_data: the firmware image to write
1106 * Write the supplied firmware image to the card's serial flash.
1107 * The FW image has the following sections: @size - 8 bytes of code and
1108 * data, followed by 4 bytes of FW version, followed by the 32-bit
1109 * 1's complement checksum of the whole image.
1111 int t3_load_fw(adapter_t *adapter, const u8 *fw_data, unsigned int size)
1115 const u32 *p = (const u32 *)fw_data;
1116 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1118 if ((size & 3) || size < FW_MIN_SIZE)
1120 if (size - 8 > FW_MAX_SIZE)
1123 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1124 csum += ntohl(p[i]);
1125 if (csum != 0xffffffff) {
1126 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1131 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1135 size -= 8; /* trim off version and checksum */
1136 for (addr = FW_FLASH_BOOT_ADDR; size; ) {
1137 unsigned int chunk_size = min(size, 256U);
1139 ret = t3_write_flash(adapter, addr, chunk_size, fw_data, 1);
1144 fw_data += chunk_size;
1148 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data, 1);
1151 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1156 * t3_load_boot - download boot flash
1157 * @adapter: the adapter
1158 * @boot_data: the boot image to write
1161 * Write the supplied boot image to the card's serial flash.
1162 * The boot image has the following sections: a 28-byte header and the
1165 int t3_load_boot(adapter_t *adapter, u8 *boot_data, unsigned int size)
1167 boot_header_t *header = (boot_header_t *)boot_data;
1170 unsigned int boot_sector = BOOT_FLASH_BOOT_ADDR >> 16;
1171 unsigned int boot_end = (BOOT_FLASH_BOOT_ADDR + size - 1) >> 16;
1174 * Perform some primitive sanity testing to avoid accidentally
1175 * writing garbage over the boot sectors. We ought to check for
1176 * more but it's not worth it for now ...
1178 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
1179 CH_ERR(adapter, "boot image too small/large\n");
1182 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE) {
1183 CH_ERR(adapter, "boot image missing signature\n");
1186 if (header->length * BOOT_SIZE_INC != size) {
1187 CH_ERR(adapter, "boot image header length != image length\n");
1191 ret = t3_flash_erase_sectors(adapter, boot_sector, boot_end);
1195 for (addr = BOOT_FLASH_BOOT_ADDR; size; ) {
1196 unsigned int chunk_size = min(size, 256U);
1198 ret = t3_write_flash(adapter, addr, chunk_size, boot_data, 0);
1203 boot_data += chunk_size;
1209 CH_ERR(adapter, "boot image download failed, error %d\n", ret);
1213 #define CIM_CTL_BASE 0x2000
1216 * t3_cim_ctl_blk_read - read a block from CIM control region
1217 * @adap: the adapter
1218 * @addr: the start address within the CIM control region
1219 * @n: number of words to read
1220 * @valp: where to store the result
1222 * Reads a block of 4-byte words from the CIM control region.
1224 int t3_cim_ctl_blk_read(adapter_t *adap, unsigned int addr, unsigned int n,
1229 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1232 for ( ; !ret && n--; addr += 4) {
1233 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1234 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1237 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1243 * t3_link_changed - handle interface link changes
1244 * @adapter: the adapter
1245 * @port_id: the port index that changed link state
1247 * Called when a port's link settings change to propagate the new values
1248 * to the associated PHY and MAC. After performing the common tasks it
1249 * invokes an OS-specific handler.
1251 void t3_link_changed(adapter_t *adapter, int port_id)
1253 int link_ok, speed, duplex, fc;
1254 struct port_info *pi = adap2pinfo(adapter, port_id);
1255 struct cphy *phy = &pi->phy;
1256 struct cmac *mac = &pi->mac;
1257 struct link_config *lc = &pi->link_config;
1259 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1261 if (lc->requested_fc & PAUSE_AUTONEG)
1262 fc &= lc->requested_fc;
1264 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1266 if (link_ok == lc->link_ok && speed == lc->speed &&
1267 duplex == lc->duplex && fc == lc->fc)
1268 return; /* nothing changed */
1270 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1271 uses_xaui(adapter)) {
1274 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1275 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1277 lc->link_ok = (unsigned char)link_ok;
1278 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1279 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1281 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1282 /* Set MAC speed, duplex, and flow control to match PHY. */
1283 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1284 lc->fc = (unsigned char)fc;
1287 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1291 * t3_link_start - apply link configuration to MAC/PHY
1292 * @phy: the PHY to setup
1293 * @mac: the MAC to setup
1294 * @lc: the requested link configuration
1296 * Set up a port's MAC and PHY according to a desired link configuration.
1297 * - If the PHY can auto-negotiate first decide what to advertise, then
1298 * enable/disable auto-negotiation as desired, and reset.
1299 * - If the PHY does not auto-negotiate just reset it.
1300 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1301 * otherwise do it later based on the outcome of auto-negotiation.
1303 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1305 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1308 if (lc->supported & SUPPORTED_Autoneg) {
1309 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1311 lc->advertising |= ADVERTISED_Asym_Pause;
1313 lc->advertising |= ADVERTISED_Pause;
1315 phy->ops->advertise(phy, lc->advertising);
1317 if (lc->autoneg == AUTONEG_DISABLE) {
1318 lc->speed = lc->requested_speed;
1319 lc->duplex = lc->requested_duplex;
1320 lc->fc = (unsigned char)fc;
1321 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1323 /* Also disables autoneg */
1324 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1326 phy->ops->autoneg_enable(phy);
1328 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1329 lc->fc = (unsigned char)fc;
1330 phy->ops->reset(phy, 0);
1336 * t3_set_vlan_accel - control HW VLAN extraction
1337 * @adapter: the adapter
1338 * @ports: bitmap of adapter ports to operate on
1339 * @on: enable (1) or disable (0) HW VLAN extraction
1341 * Enables or disables HW extraction of VLAN tags for the given port.
1343 void t3_set_vlan_accel(adapter_t *adapter, unsigned int ports, int on)
1345 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1346 ports << S_VLANEXTRACTIONENABLE,
1347 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1351 unsigned int mask; /* bits to check in interrupt status */
1352 const char *msg; /* message to print or NULL */
1353 short stat_idx; /* stat counter to increment or -1 */
1354 unsigned short fatal; /* whether the condition reported is fatal */
1358 * t3_handle_intr_status - table driven interrupt handler
1359 * @adapter: the adapter that generated the interrupt
1360 * @reg: the interrupt status register to process
1361 * @mask: a mask to apply to the interrupt status
1362 * @acts: table of interrupt actions
1363 * @stats: statistics counters tracking interrupt occurences
1365 * A table driven interrupt handler that applies a set of masks to an
1366 * interrupt status word and performs the corresponding actions if the
1367 * interrupts described by the mask have occured. The actions include
1368 * optionally printing a warning or alert message, and optionally
1369 * incrementing a stat counter. The table is terminated by an entry
1370 * specifying mask 0. Returns the number of fatal interrupt conditions.
1372 static int t3_handle_intr_status(adapter_t *adapter, unsigned int reg,
1374 const struct intr_info *acts,
1375 unsigned long *stats)
1378 unsigned int status = t3_read_reg(adapter, reg) & mask;
1380 for ( ; acts->mask; ++acts) {
1381 if (!(status & acts->mask)) continue;
1384 CH_ALERT(adapter, "%s (0x%x)\n",
1385 acts->msg, status & acts->mask);
1386 } else if (acts->msg)
1387 CH_WARN(adapter, "%s (0x%x)\n",
1388 acts->msg, status & acts->mask);
1389 if (acts->stat_idx >= 0)
1390 stats[acts->stat_idx]++;
1392 if (status) /* clear processed interrupts */
1393 t3_write_reg(adapter, reg, status);
1397 #define SGE_INTR_MASK (F_RSPQDISABLED | \
1398 F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1399 F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1400 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1401 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1402 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1404 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1405 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1407 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1408 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1409 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1410 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1411 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1412 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1413 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1414 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1415 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1416 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1417 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1418 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1419 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1420 F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1421 F_TXPARERR | V_BISTERR(M_BISTERR))
1422 #define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1423 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1424 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1425 #define ULPTX_INTR_MASK 0xfc
1426 #define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1427 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1428 F_ZERO_SWITCH_ERROR)
1429 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1430 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1431 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1432 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1433 F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1434 F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1435 F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1436 F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1437 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1438 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1439 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1440 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1441 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1442 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1443 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1444 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1445 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1446 V_MCAPARERRENB(M_MCAPARERRENB))
1447 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1448 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1449 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1450 F_MPS0 | F_CPL_SWITCH)
1453 * Interrupt handler for the PCIX1 module.
1455 static void pci_intr_handler(adapter_t *adapter)
1457 static struct intr_info pcix1_intr_info[] = {
1458 { F_MSTDETPARERR, "PCI master detected parity error", -1, 1 },
1459 { F_SIGTARABT, "PCI signaled target abort", -1, 1 },
1460 { F_RCVTARABT, "PCI received target abort", -1, 1 },
1461 { F_RCVMSTABT, "PCI received master abort", -1, 1 },
1462 { F_SIGSYSERR, "PCI signaled system error", -1, 1 },
1463 { F_DETPARERR, "PCI detected parity error", -1, 1 },
1464 { F_SPLCMPDIS, "PCI split completion discarded", -1, 1 },
1465 { F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1 },
1466 { F_RCVSPLCMPERR, "PCI received split completion error", -1,
1468 { F_DETCORECCERR, "PCI correctable ECC error",
1469 STAT_PCI_CORR_ECC, 0 },
1470 { F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1 },
1471 { F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1472 { V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1474 { V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1476 { V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1478 { V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1483 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1484 pcix1_intr_info, adapter->irq_stats))
1485 t3_fatal_err(adapter);
1489 * Interrupt handler for the PCIE module.
1491 static void pcie_intr_handler(adapter_t *adapter)
1493 static struct intr_info pcie_intr_info[] = {
1494 { F_PEXERR, "PCI PEX error", -1, 1 },
1496 "PCI unexpected split completion DMA read error", -1, 1 },
1498 "PCI unexpected split completion DMA command error", -1, 1 },
1499 { F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1500 { F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1 },
1501 { F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1 },
1502 { F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1 },
1503 { V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1504 "PCI MSI-X table/PBA parity error", -1, 1 },
1505 { F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1 },
1506 { F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1 },
1507 { F_RXPARERR, "PCI Rx parity error", -1, 1 },
1508 { F_TXPARERR, "PCI Tx parity error", -1, 1 },
1509 { V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1 },
1513 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1514 CH_ALERT(adapter, "PEX error code 0x%x\n",
1515 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1517 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1518 pcie_intr_info, adapter->irq_stats))
1519 t3_fatal_err(adapter);
1523 * TP interrupt handler.
1525 static void tp_intr_handler(adapter_t *adapter)
1527 static struct intr_info tp_intr_info[] = {
1528 { 0xffffff, "TP parity error", -1, 1 },
1529 { 0x1000000, "TP out of Rx pages", -1, 1 },
1530 { 0x2000000, "TP out of Tx pages", -1, 1 },
1533 static struct intr_info tp_intr_info_t3c[] = {
1534 { 0x1fffffff, "TP parity error", -1, 1 },
1535 { F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1 },
1536 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1540 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1541 adapter->params.rev < T3_REV_C ?
1542 tp_intr_info : tp_intr_info_t3c, NULL))
1543 t3_fatal_err(adapter);
1547 * CIM interrupt handler.
1549 static void cim_intr_handler(adapter_t *adapter)
1551 static struct intr_info cim_intr_info[] = {
1552 { F_RSVDSPACEINT, "CIM reserved space write", -1, 1 },
1553 { F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1 },
1554 { F_FLASHRANGEINT, "CIM flash address out of range", -1, 1 },
1555 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1556 { F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1 },
1557 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1558 { F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1 },
1559 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1560 { F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1 },
1561 { F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1 },
1562 { F_BLKRDPLINT, "CIM block read from PL space", -1, 1 },
1563 { F_BLKWRPLINT, "CIM block write to PL space", -1, 1 },
1564 { F_DRAMPARERR, "CIM DRAM parity error", -1, 1 },
1565 { F_ICACHEPARERR, "CIM icache parity error", -1, 1 },
1566 { F_DCACHEPARERR, "CIM dcache parity error", -1, 1 },
1567 { F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1 },
1568 { F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1 },
1569 { F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1 },
1570 { F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1 },
1571 { F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1 },
1572 { F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1 },
1573 { F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1 },
1574 { F_ITAGPARERR, "CIM itag parity error", -1, 1 },
1575 { F_DTAGPARERR, "CIM dtag parity error", -1, 1 },
1579 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, CIM_INTR_MASK,
1580 cim_intr_info, NULL))
1581 t3_fatal_err(adapter);
1585 * ULP RX interrupt handler.
1587 static void ulprx_intr_handler(adapter_t *adapter)
1589 static struct intr_info ulprx_intr_info[] = {
1590 { F_PARERRDATA, "ULP RX data parity error", -1, 1 },
1591 { F_PARERRPCMD, "ULP RX command parity error", -1, 1 },
1592 { F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1 },
1593 { F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1 },
1594 { F_ARBFPERR, "ULP RX ArbF parity error", -1, 1 },
1595 { F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1 },
1596 { F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1 },
1597 { F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1 },
1601 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1602 ulprx_intr_info, NULL))
1603 t3_fatal_err(adapter);
1607 * ULP TX interrupt handler.
1609 static void ulptx_intr_handler(adapter_t *adapter)
1611 static struct intr_info ulptx_intr_info[] = {
1612 { F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1613 STAT_ULP_CH0_PBL_OOB, 0 },
1614 { F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1615 STAT_ULP_CH1_PBL_OOB, 0 },
1616 { 0xfc, "ULP TX parity error", -1, 1 },
1620 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1621 ulptx_intr_info, adapter->irq_stats))
1622 t3_fatal_err(adapter);
1625 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1626 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1627 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1628 F_ICSPI1_TX_FRAMING_ERROR)
1629 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1630 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1631 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1632 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1635 * PM TX interrupt handler.
1637 static void pmtx_intr_handler(adapter_t *adapter)
1639 static struct intr_info pmtx_intr_info[] = {
1640 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1641 { ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1 },
1642 { OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1 },
1643 { V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1644 "PMTX ispi parity error", -1, 1 },
1645 { V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1646 "PMTX ospi parity error", -1, 1 },
1650 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1651 pmtx_intr_info, NULL))
1652 t3_fatal_err(adapter);
1655 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1656 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1657 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1658 F_IESPI1_TX_FRAMING_ERROR)
1659 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1660 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1661 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1662 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1665 * PM RX interrupt handler.
1667 static void pmrx_intr_handler(adapter_t *adapter)
1669 static struct intr_info pmrx_intr_info[] = {
1670 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1671 { IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1 },
1672 { OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1 },
1673 { V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1674 "PMRX ispi parity error", -1, 1 },
1675 { V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1676 "PMRX ospi parity error", -1, 1 },
1680 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1681 pmrx_intr_info, NULL))
1682 t3_fatal_err(adapter);
1686 * CPL switch interrupt handler.
1688 static void cplsw_intr_handler(adapter_t *adapter)
1690 static struct intr_info cplsw_intr_info[] = {
1691 { F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1 },
1692 { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 },
1693 { F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1 },
1694 { F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1 },
1695 { F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1 },
1696 { F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1 },
1700 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1701 cplsw_intr_info, NULL))
1702 t3_fatal_err(adapter);
1706 * MPS interrupt handler.
1708 static void mps_intr_handler(adapter_t *adapter)
1710 static struct intr_info mps_intr_info[] = {
1711 { 0x1ff, "MPS parity error", -1, 1 },
1715 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1716 mps_intr_info, NULL))
1717 t3_fatal_err(adapter);
1720 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1723 * MC7 interrupt handler.
1725 static void mc7_intr_handler(struct mc7 *mc7)
1727 adapter_t *adapter = mc7->adapter;
1728 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1731 mc7->stats.corr_err++;
1732 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1733 "data 0x%x 0x%x 0x%x\n", mc7->name,
1734 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1735 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1736 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1737 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1741 mc7->stats.uncorr_err++;
1742 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1743 "data 0x%x 0x%x 0x%x\n", mc7->name,
1744 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1745 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1746 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1747 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1751 mc7->stats.parity_err++;
1752 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1753 mc7->name, G_PE(cause));
1759 if (adapter->params.rev > 0)
1760 addr = t3_read_reg(adapter,
1761 mc7->offset + A_MC7_ERR_ADDR);
1762 mc7->stats.addr_err++;
1763 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1767 if (cause & MC7_INTR_FATAL)
1768 t3_fatal_err(adapter);
1770 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1773 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1774 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1776 * XGMAC interrupt handler.
1778 static int mac_intr_handler(adapter_t *adap, unsigned int idx)
1783 idx = idx == 0 ? 0 : adapter_info(adap)->nports0; /* MAC idx -> port */
1784 mac = &adap2pinfo(adap, idx)->mac;
1785 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1787 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1788 mac->stats.tx_fifo_parity_err++;
1789 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1791 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1792 mac->stats.rx_fifo_parity_err++;
1793 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1795 if (cause & F_TXFIFO_UNDERRUN)
1796 mac->stats.tx_fifo_urun++;
1797 if (cause & F_RXFIFO_OVERFLOW)
1798 mac->stats.rx_fifo_ovfl++;
1799 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1800 mac->stats.serdes_signal_loss++;
1801 if (cause & F_XAUIPCSCTCERR)
1802 mac->stats.xaui_pcs_ctc_err++;
1803 if (cause & F_XAUIPCSALIGNCHANGE)
1804 mac->stats.xaui_pcs_align_change++;
1806 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1807 if (cause & XGM_INTR_FATAL)
1813 * Interrupt handler for PHY events.
1815 int t3_phy_intr_handler(adapter_t *adapter)
1817 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1819 for_each_port(adapter, i) {
1820 struct port_info *p = adap2pinfo(adapter, i);
1822 if (!(p->phy.caps & SUPPORTED_IRQ))
1825 if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
1826 int phy_cause = p->phy.ops->intr_handler(&p->phy);
1828 if (phy_cause & cphy_cause_link_change)
1829 t3_link_changed(adapter, i);
1830 if (phy_cause & cphy_cause_fifo_error)
1831 p->phy.fifo_errors++;
1832 if (phy_cause & cphy_cause_module_change)
1833 t3_os_phymod_changed(adapter, i);
1837 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1842 * t3_slow_intr_handler - control path interrupt handler
1843 * @adapter: the adapter
1845 * T3 interrupt handler for non-data interrupt events, e.g., errors.
1846 * The designation 'slow' is because it involves register reads, while
1847 * data interrupts typically don't involve any MMIOs.
1849 int t3_slow_intr_handler(adapter_t *adapter)
1851 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1853 cause &= adapter->slow_intr_mask;
1856 if (cause & F_PCIM0) {
1857 if (is_pcie(adapter))
1858 pcie_intr_handler(adapter);
1860 pci_intr_handler(adapter);
1863 t3_sge_err_intr_handler(adapter);
1864 if (cause & F_MC7_PMRX)
1865 mc7_intr_handler(&adapter->pmrx);
1866 if (cause & F_MC7_PMTX)
1867 mc7_intr_handler(&adapter->pmtx);
1868 if (cause & F_MC7_CM)
1869 mc7_intr_handler(&adapter->cm);
1871 cim_intr_handler(adapter);
1873 tp_intr_handler(adapter);
1874 if (cause & F_ULP2_RX)
1875 ulprx_intr_handler(adapter);
1876 if (cause & F_ULP2_TX)
1877 ulptx_intr_handler(adapter);
1878 if (cause & F_PM1_RX)
1879 pmrx_intr_handler(adapter);
1880 if (cause & F_PM1_TX)
1881 pmtx_intr_handler(adapter);
1882 if (cause & F_CPL_SWITCH)
1883 cplsw_intr_handler(adapter);
1885 mps_intr_handler(adapter);
1887 t3_mc5_intr_handler(&adapter->mc5);
1888 if (cause & F_XGMAC0_0)
1889 mac_intr_handler(adapter, 0);
1890 if (cause & F_XGMAC0_1)
1891 mac_intr_handler(adapter, 1);
1892 if (cause & F_T3DBG)
1893 t3_os_ext_intr_handler(adapter);
1895 /* Clear the interrupts just processed. */
1896 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1897 (void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1901 static unsigned int calc_gpio_intr(adapter_t *adap)
1903 unsigned int i, gpi_intr = 0;
1905 for_each_port(adap, i)
1906 if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1907 adapter_info(adap)->gpio_intr[i])
1908 gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1913 * t3_intr_enable - enable interrupts
1914 * @adapter: the adapter whose interrupts should be enabled
1916 * Enable interrupts by setting the interrupt enable registers of the
1917 * various HW modules and then enabling the top-level interrupt
1920 void t3_intr_enable(adapter_t *adapter)
1922 static struct addr_val_pair intr_en_avp[] = {
1923 { A_MC7_INT_ENABLE, MC7_INTR_MASK },
1924 { A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1926 { A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1928 { A_MC5_DB_INT_ENABLE, MC5_INTR_MASK },
1929 { A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK },
1930 { A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK },
1931 { A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK },
1932 { A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK },
1933 { A_MPS_INT_ENABLE, MPS_INTR_MASK },
1936 adapter->slow_intr_mask = PL_INTR_MASK;
1938 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1939 t3_write_reg(adapter, A_TP_INT_ENABLE,
1940 adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
1941 t3_write_reg(adapter, A_SG_INT_ENABLE,
1942 adapter->params.rev >= T3_REV_C ?
1943 SGE_INTR_MASK | F_FLEMPTY : SGE_INTR_MASK);
1945 if (adapter->params.rev > 0) {
1946 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1947 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1948 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1949 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1950 F_PBL_BOUND_ERR_CH1);
1952 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1953 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1956 t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
1958 if (is_pcie(adapter))
1959 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1961 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1962 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1963 (void) t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1967 * t3_intr_disable - disable a card's interrupts
1968 * @adapter: the adapter whose interrupts should be disabled
1970 * Disable interrupts. We only disable the top-level interrupt
1971 * concentrator and the SGE data interrupts.
1973 void t3_intr_disable(adapter_t *adapter)
1975 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1976 (void) t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1977 adapter->slow_intr_mask = 0;
1981 * t3_intr_clear - clear all interrupts
1982 * @adapter: the adapter whose interrupts should be cleared
1984 * Clears all interrupts.
1986 void t3_intr_clear(adapter_t *adapter)
1988 static const unsigned int cause_reg_addr[] = {
1990 A_SG_RSPQ_FL_STATUS,
1993 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1994 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1995 A_CIM_HOST_INT_CAUSE,
2008 /* Clear PHY and MAC interrupts for each port. */
2009 for_each_port(adapter, i)
2010 t3_port_intr_clear(adapter, i);
2012 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
2013 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
2015 if (is_pcie(adapter))
2016 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
2017 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
2018 (void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
2022 * t3_port_intr_enable - enable port-specific interrupts
2023 * @adapter: associated adapter
2024 * @idx: index of port whose interrupts should be enabled
2026 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
2029 void t3_port_intr_enable(adapter_t *adapter, int idx)
2031 struct port_info *pi = adap2pinfo(adapter, idx);
2033 t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, XGM_INTR_MASK);
2034 pi->phy.ops->intr_enable(&pi->phy);
2038 * t3_port_intr_disable - disable port-specific interrupts
2039 * @adapter: associated adapter
2040 * @idx: index of port whose interrupts should be disabled
2042 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
2045 void t3_port_intr_disable(adapter_t *adapter, int idx)
2047 struct port_info *pi = adap2pinfo(adapter, idx);
2049 t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, 0);
2050 pi->phy.ops->intr_disable(&pi->phy);
2054 * t3_port_intr_clear - clear port-specific interrupts
2055 * @adapter: associated adapter
2056 * @idx: index of port whose interrupts to clear
2058 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
2061 void t3_port_intr_clear(adapter_t *adapter, int idx)
2063 struct port_info *pi = adap2pinfo(adapter, idx);
2065 t3_write_reg(adapter, A_XGM_INT_CAUSE + pi->mac.offset, 0xffffffff);
2066 pi->phy.ops->intr_clear(&pi->phy);
2069 #define SG_CONTEXT_CMD_ATTEMPTS 100
2072 * t3_sge_write_context - write an SGE context
2073 * @adapter: the adapter
2074 * @id: the context id
2075 * @type: the context type
2077 * Program an SGE context with the values already loaded in the
2078 * CONTEXT_DATA? registers.
2080 static int t3_sge_write_context(adapter_t *adapter, unsigned int id,
2083 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2084 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2085 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2086 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2087 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2088 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2089 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2090 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2093 static int clear_sge_ctxt(adapter_t *adap, unsigned int id, unsigned int type)
2095 t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2096 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2097 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2098 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2099 return t3_sge_write_context(adap, id, type);
2103 * t3_sge_init_ecntxt - initialize an SGE egress context
2104 * @adapter: the adapter to configure
2105 * @id: the context id
2106 * @gts_enable: whether to enable GTS for the context
2107 * @type: the egress context type
2108 * @respq: associated response queue
2109 * @base_addr: base address of queue
2110 * @size: number of queue entries
2112 * @gen: initial generation value for the context
2113 * @cidx: consumer pointer
2115 * Initialize an SGE egress context and make it ready for use. If the
2116 * platform allows concurrent context operations, the caller is
2117 * responsible for appropriate locking.
2119 int t3_sge_init_ecntxt(adapter_t *adapter, unsigned int id, int gts_enable,
2120 enum sge_context_type type, int respq, u64 base_addr,
2121 unsigned int size, unsigned int token, int gen,
2124 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2126 if (base_addr & 0xfff) /* must be 4K aligned */
2128 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2132 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2133 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2134 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2135 V_EC_BASE_LO((u32)base_addr & 0xffff));
2137 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, (u32)base_addr);
2139 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2140 V_EC_BASE_HI((u32)base_addr & 0xf) | V_EC_RESPQ(respq) |
2141 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2143 return t3_sge_write_context(adapter, id, F_EGRESS);
2147 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2148 * @adapter: the adapter to configure
2149 * @id: the context id
2150 * @gts_enable: whether to enable GTS for the context
2151 * @base_addr: base address of queue
2152 * @size: number of queue entries
2153 * @bsize: size of each buffer for this queue
2154 * @cong_thres: threshold to signal congestion to upstream producers
2155 * @gen: initial generation value for the context
2156 * @cidx: consumer pointer
2158 * Initialize an SGE free list context and make it ready for use. The
2159 * caller is responsible for ensuring only one context operation occurs
2162 int t3_sge_init_flcntxt(adapter_t *adapter, unsigned int id, int gts_enable,
2163 u64 base_addr, unsigned int size, unsigned int bsize,
2164 unsigned int cong_thres, int gen, unsigned int cidx)
2166 if (base_addr & 0xfff) /* must be 4K aligned */
2168 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2172 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, (u32)base_addr);
2174 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2175 V_FL_BASE_HI((u32)base_addr) |
2176 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2177 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2178 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2179 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2180 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2181 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2182 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2183 return t3_sge_write_context(adapter, id, F_FREELIST);
2187 * t3_sge_init_rspcntxt - initialize an SGE response queue context
2188 * @adapter: the adapter to configure
2189 * @id: the context id
2190 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2191 * @base_addr: base address of queue
2192 * @size: number of queue entries
2193 * @fl_thres: threshold for selecting the normal or jumbo free list
2194 * @gen: initial generation value for the context
2195 * @cidx: consumer pointer
2197 * Initialize an SGE response queue context and make it ready for use.
2198 * The caller is responsible for ensuring only one context operation
2201 int t3_sge_init_rspcntxt(adapter_t *adapter, unsigned int id, int irq_vec_idx,
2202 u64 base_addr, unsigned int size,
2203 unsigned int fl_thres, int gen, unsigned int cidx)
2205 unsigned int intr = 0;
2207 if (base_addr & 0xfff) /* must be 4K aligned */
2209 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2213 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2215 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
2217 if (irq_vec_idx >= 0)
2218 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2219 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2220 V_CQ_BASE_HI((u32)base_addr) | intr | V_RQ_GEN(gen));
2221 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2222 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2226 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2227 * @adapter: the adapter to configure
2228 * @id: the context id
2229 * @base_addr: base address of queue
2230 * @size: number of queue entries
2231 * @rspq: response queue for async notifications
2232 * @ovfl_mode: CQ overflow mode
2233 * @credits: completion queue credits
2234 * @credit_thres: the credit threshold
2236 * Initialize an SGE completion queue context and make it ready for use.
2237 * The caller is responsible for ensuring only one context operation
2240 int t3_sge_init_cqcntxt(adapter_t *adapter, unsigned int id, u64 base_addr,
2241 unsigned int size, int rspq, int ovfl_mode,
2242 unsigned int credits, unsigned int credit_thres)
2244 if (base_addr & 0xfff) /* must be 4K aligned */
2246 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2250 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2251 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
2253 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2254 V_CQ_BASE_HI((u32)base_addr) | V_CQ_RSPQ(rspq) |
2255 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2256 V_CQ_ERR(ovfl_mode));
2257 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2258 V_CQ_CREDIT_THRES(credit_thres));
2259 return t3_sge_write_context(adapter, id, F_CQ);
2263 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2264 * @adapter: the adapter
2265 * @id: the egress context id
2266 * @enable: enable (1) or disable (0) the context
2268 * Enable or disable an SGE egress context. The caller is responsible for
2269 * ensuring only one context operation occurs at a time.
2271 int t3_sge_enable_ecntxt(adapter_t *adapter, unsigned int id, int enable)
2273 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2276 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2277 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2278 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2279 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2280 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2281 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2282 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2283 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2284 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2288 * t3_sge_disable_fl - disable an SGE free-buffer list
2289 * @adapter: the adapter
2290 * @id: the free list context id
2292 * Disable an SGE free-buffer list. The caller is responsible for
2293 * ensuring only one context operation occurs at a time.
2295 int t3_sge_disable_fl(adapter_t *adapter, unsigned int id)
2297 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2300 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2301 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2302 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2303 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2304 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2305 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2306 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2307 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2308 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2312 * t3_sge_disable_rspcntxt - disable an SGE response queue
2313 * @adapter: the adapter
2314 * @id: the response queue context id
2316 * Disable an SGE response queue. The caller is responsible for
2317 * ensuring only one context operation occurs at a time.
2319 int t3_sge_disable_rspcntxt(adapter_t *adapter, unsigned int id)
2321 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2324 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2325 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2326 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2327 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2328 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2329 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2330 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2331 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2332 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2336 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2337 * @adapter: the adapter
2338 * @id: the completion queue context id
2340 * Disable an SGE completion queue. The caller is responsible for
2341 * ensuring only one context operation occurs at a time.
2343 int t3_sge_disable_cqcntxt(adapter_t *adapter, unsigned int id)
2345 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2348 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2349 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2350 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2351 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2352 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2353 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2354 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2355 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2356 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2360 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2361 * @adapter: the adapter
2362 * @id: the context id
2363 * @op: the operation to perform
2364 * @credits: credits to return to the CQ
2366 * Perform the selected operation on an SGE completion queue context.
2367 * The caller is responsible for ensuring only one context operation
2370 * For most operations the function returns the current HW position in
2371 * the completion queue.
2373 int t3_sge_cqcntxt_op(adapter_t *adapter, unsigned int id, unsigned int op,
2374 unsigned int credits)
2378 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2381 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2382 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2383 V_CONTEXT(id) | F_CQ);
2384 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2385 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2388 if (op >= 2 && op < 7) {
2389 if (adapter->params.rev > 0)
2390 return G_CQ_INDEX(val);
2392 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2393 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2394 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2395 F_CONTEXT_CMD_BUSY, 0,
2396 SG_CONTEXT_CMD_ATTEMPTS, 1))
2398 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2404 * t3_sge_read_context - read an SGE context
2405 * @type: the context type
2406 * @adapter: the adapter
2407 * @id: the context id
2408 * @data: holds the retrieved context
2410 * Read an SGE egress context. The caller is responsible for ensuring
2411 * only one context operation occurs at a time.
2413 static int t3_sge_read_context(unsigned int type, adapter_t *adapter,
2414 unsigned int id, u32 data[4])
2416 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2419 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2420 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2421 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2422 SG_CONTEXT_CMD_ATTEMPTS, 1))
2424 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2425 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2426 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2427 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2432 * t3_sge_read_ecntxt - read an SGE egress context
2433 * @adapter: the adapter
2434 * @id: the context id
2435 * @data: holds the retrieved context
2437 * Read an SGE egress context. The caller is responsible for ensuring
2438 * only one context operation occurs at a time.
2440 int t3_sge_read_ecntxt(adapter_t *adapter, unsigned int id, u32 data[4])
2444 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2448 * t3_sge_read_cq - read an SGE CQ context
2449 * @adapter: the adapter
2450 * @id: the context id
2451 * @data: holds the retrieved context
2453 * Read an SGE CQ context. The caller is responsible for ensuring
2454 * only one context operation occurs at a time.
2456 int t3_sge_read_cq(adapter_t *adapter, unsigned int id, u32 data[4])
2460 return t3_sge_read_context(F_CQ, adapter, id, data);
2464 * t3_sge_read_fl - read an SGE free-list context
2465 * @adapter: the adapter
2466 * @id: the context id
2467 * @data: holds the retrieved context
2469 * Read an SGE free-list context. The caller is responsible for ensuring
2470 * only one context operation occurs at a time.
2472 int t3_sge_read_fl(adapter_t *adapter, unsigned int id, u32 data[4])
2474 if (id >= SGE_QSETS * 2)
2476 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2480 * t3_sge_read_rspq - read an SGE response queue context
2481 * @adapter: the adapter
2482 * @id: the context id
2483 * @data: holds the retrieved context
2485 * Read an SGE response queue context. The caller is responsible for
2486 * ensuring only one context operation occurs at a time.
2488 int t3_sge_read_rspq(adapter_t *adapter, unsigned int id, u32 data[4])
2490 if (id >= SGE_QSETS)
2492 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2496 * t3_config_rss - configure Rx packet steering
2497 * @adapter: the adapter
2498 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2499 * @cpus: values for the CPU lookup table (0xff terminated)
2500 * @rspq: values for the response queue lookup table (0xffff terminated)
2502 * Programs the receive packet steering logic. @cpus and @rspq provide
2503 * the values for the CPU and response queue lookup tables. If they
2504 * provide fewer values than the size of the tables the supplied values
2505 * are used repeatedly until the tables are fully populated.
2507 void t3_config_rss(adapter_t *adapter, unsigned int rss_config, const u8 *cpus,
2510 int i, j, cpu_idx = 0, q_idx = 0;
2513 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2516 for (j = 0; j < 2; ++j) {
2517 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2518 if (cpus[cpu_idx] == 0xff)
2521 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2525 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2526 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2527 (i << 16) | rspq[q_idx++]);
2528 if (rspq[q_idx] == 0xffff)
2532 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2536 * t3_read_rss - read the contents of the RSS tables
2537 * @adapter: the adapter
2538 * @lkup: holds the contents of the RSS lookup table
2539 * @map: holds the contents of the RSS map table
2541 * Reads the contents of the receive packet steering tables.
2543 int t3_read_rss(adapter_t *adapter, u8 *lkup, u16 *map)
2549 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2550 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2552 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2553 if (!(val & 0x80000000))
2556 *lkup++ = (u8)(val >> 8);
2560 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2561 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2563 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2564 if (!(val & 0x80000000))
2572 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2573 * @adap: the adapter
2574 * @enable: 1 to select offload mode, 0 for regular NIC
2576 * Switches TP to NIC/offload mode.
2578 void t3_tp_set_offload_mode(adapter_t *adap, int enable)
2580 if (is_offload(adap) || !enable)
2581 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2582 V_NICMODE(!enable));
2586 * tp_wr_bits_indirect - set/clear bits in an indirect TP register
2587 * @adap: the adapter
2588 * @addr: the indirect TP register address
2589 * @mask: specifies the field within the register to modify
2590 * @val: new value for the field
2592 * Sets a field of an indirect TP register to the given value.
2594 static void tp_wr_bits_indirect(adapter_t *adap, unsigned int addr,
2595 unsigned int mask, unsigned int val)
2597 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2598 val |= t3_read_reg(adap, A_TP_PIO_DATA) & ~mask;
2599 t3_write_reg(adap, A_TP_PIO_DATA, val);
2603 * t3_enable_filters - enable the HW filters
2604 * @adap: the adapter
2606 * Enables the HW filters for NIC traffic.
2608 void t3_enable_filters(adapter_t *adap)
2610 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE, 0);
2611 t3_set_reg_field(adap, A_MC5_DB_CONFIG, 0, F_FILTEREN);
2612 t3_set_reg_field(adap, A_TP_GLOBAL_CONFIG, 0, V_FIVETUPLELOOKUP(3));
2613 tp_wr_bits_indirect(adap, A_TP_INGRESS_CONFIG, 0, F_LOOKUPEVERYPKT);
2617 * pm_num_pages - calculate the number of pages of the payload memory
2618 * @mem_size: the size of the payload memory
2619 * @pg_size: the size of each payload memory page
2621 * Calculate the number of pages, each of the given size, that fit in a
2622 * memory of the specified size, respecting the HW requirement that the
2623 * number of pages must be a multiple of 24.
2625 static inline unsigned int pm_num_pages(unsigned int mem_size,
2626 unsigned int pg_size)
2628 unsigned int n = mem_size / pg_size;
2633 #define mem_region(adap, start, size, reg) \
2634 t3_write_reg((adap), A_ ## reg, (start)); \
2638 * partition_mem - partition memory and configure TP memory settings
2639 * @adap: the adapter
2640 * @p: the TP parameters
2642 * Partitions context and payload memory and configures TP's memory
2645 static void partition_mem(adapter_t *adap, const struct tp_params *p)
2647 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2648 unsigned int timers = 0, timers_shift = 22;
2650 if (adap->params.rev > 0) {
2651 if (tids <= 16 * 1024) {
2654 } else if (tids <= 64 * 1024) {
2657 } else if (tids <= 256 * 1024) {
2663 t3_write_reg(adap, A_TP_PMM_SIZE,
2664 p->chan_rx_size | (p->chan_tx_size >> 16));
2666 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2667 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2668 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2669 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2670 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2672 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2673 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2674 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2676 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2677 /* Add a bit of headroom and make multiple of 24 */
2679 pstructs -= pstructs % 24;
2680 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2682 m = tids * TCB_SIZE;
2683 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2684 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2685 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2686 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2687 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2688 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2689 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2690 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2692 m = (m + 4095) & ~0xfff;
2693 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2694 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2696 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2697 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2698 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2700 adap->params.mc5.nservers += m - tids;
2703 static inline void tp_wr_indirect(adapter_t *adap, unsigned int addr, u32 val)
2705 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2706 t3_write_reg(adap, A_TP_PIO_DATA, val);
2709 static void tp_config(adapter_t *adap, const struct tp_params *p)
2711 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2712 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2713 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2714 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2715 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2716 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2717 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2718 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2719 V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
2720 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2721 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
2722 F_IPV6ENABLE | F_NICMODE);
2723 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2724 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2725 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2726 adap->params.rev > 0 ? F_ENABLEESND :
2728 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2730 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2731 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2732 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2733 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2734 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
2735 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2736 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2738 if (adap->params.rev > 0) {
2739 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2740 t3_set_reg_field(adap, A_TP_PARA_REG3, 0,
2741 F_TXPACEAUTO | F_TXPACEAUTOSTRICT);
2742 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2743 tp_wr_indirect(adap, A_TP_VLAN_PRI_MAP, 0xfa50);
2744 tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP0, 0xfac688);
2745 tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP1, 0xfac688);
2747 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2749 if (adap->params.rev == T3_REV_C)
2750 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2751 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2752 V_TABLELATENCYDELTA(4));
2754 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2755 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2756 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2757 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2759 if (adap->params.nports > 2) {
2760 t3_set_reg_field(adap, A_TP_PC_CONFIG2, 0,
2761 F_ENABLETXPORTFROMDA2 | F_ENABLETXPORTFROMDA |
2762 F_ENABLERXPORTFROMADDR);
2763 tp_wr_bits_indirect(adap, A_TP_QOS_RX_MAP_MODE,
2764 V_RXMAPMODE(M_RXMAPMODE), 0);
2765 tp_wr_indirect(adap, A_TP_INGRESS_CONFIG, V_BITPOS0(48) |
2766 V_BITPOS1(49) | V_BITPOS2(50) | V_BITPOS3(51) |
2767 F_ENABLEEXTRACT | F_ENABLEEXTRACTIONSFD |
2768 F_ENABLEINSERTION | F_ENABLEINSERTIONSFD);
2769 tp_wr_indirect(adap, A_TP_PREAMBLE_MSB, 0xfb000000);
2770 tp_wr_indirect(adap, A_TP_PREAMBLE_LSB, 0xd5);
2771 tp_wr_indirect(adap, A_TP_INTF_FROM_TX_PKT, F_INTFFROMTXPKT);
2775 /* TCP timer values in ms */
2776 #define TP_DACK_TIMER 50
2777 #define TP_RTO_MIN 250
2780 * tp_set_timers - set TP timing parameters
2781 * @adap: the adapter to set
2782 * @core_clk: the core clock frequency in Hz
2784 * Set TP's timing parameters, such as the various timer resolutions and
2785 * the TCP timer values.
2787 static void tp_set_timers(adapter_t *adap, unsigned int core_clk)
2789 unsigned int tre = adap->params.tp.tre;
2790 unsigned int dack_re = adap->params.tp.dack_re;
2791 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2792 unsigned int tps = core_clk >> tre;
2794 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2795 V_DELAYEDACKRESOLUTION(dack_re) |
2796 V_TIMESTAMPRESOLUTION(tstamp_re));
2797 t3_write_reg(adap, A_TP_DACK_TIMER,
2798 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2799 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2800 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2801 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2802 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2803 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2804 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2805 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2808 #define SECONDS * tps
2810 t3_write_reg(adap, A_TP_MSL,
2811 adap->params.rev > 0 ? 0 : 2 SECONDS);
2812 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2813 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2814 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2815 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2816 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2817 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2818 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2819 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2824 #ifdef CONFIG_CHELSIO_T3_CORE
2826 * t3_tp_set_coalescing_size - set receive coalescing size
2827 * @adap: the adapter
2828 * @size: the receive coalescing size
2829 * @psh: whether a set PSH bit should deliver coalesced data
2831 * Set the receive coalescing size and PSH bit handling.
2833 int t3_tp_set_coalescing_size(adapter_t *adap, unsigned int size, int psh)
2837 if (size > MAX_RX_COALESCING_LEN)
2840 val = t3_read_reg(adap, A_TP_PARA_REG3);
2841 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2844 val |= F_RXCOALESCEENABLE;
2846 val |= F_RXCOALESCEPSHEN;
2847 size = min(MAX_RX_COALESCING_LEN, size);
2848 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2849 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2851 t3_write_reg(adap, A_TP_PARA_REG3, val);
2856 * t3_tp_set_max_rxsize - set the max receive size
2857 * @adap: the adapter
2858 * @size: the max receive size
2860 * Set TP's max receive size. This is the limit that applies when
2861 * receive coalescing is disabled.
2863 void t3_tp_set_max_rxsize(adapter_t *adap, unsigned int size)
2865 t3_write_reg(adap, A_TP_PARA_REG7,
2866 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2869 static void __devinit init_mtus(unsigned short mtus[])
2872 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2873 * it can accomodate max size TCP/IP headers when SACK and timestamps
2874 * are enabled and still have at least 8 bytes of payload.
2895 * init_cong_ctrl - initialize congestion control parameters
2896 * @a: the alpha values for congestion control
2897 * @b: the beta values for congestion control
2899 * Initialize the congestion control parameters.
2901 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
2903 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2928 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2931 b[13] = b[14] = b[15] = b[16] = 3;
2932 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2933 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2938 /* The minimum additive increment value for the congestion control table */
2939 #define CC_MIN_INCR 2U
2942 * t3_load_mtus - write the MTU and congestion control HW tables
2943 * @adap: the adapter
2944 * @mtus: the unrestricted values for the MTU table
2945 * @alpha: the values for the congestion control alpha parameter
2946 * @beta: the values for the congestion control beta parameter
2947 * @mtu_cap: the maximum permitted effective MTU
2949 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2950 * Update the high-speed congestion control table with the supplied alpha,
2953 void t3_load_mtus(adapter_t *adap, unsigned short mtus[NMTUS],
2954 unsigned short alpha[NCCTRL_WIN],
2955 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2957 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2958 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2959 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2960 28672, 40960, 57344, 81920, 114688, 163840, 229376 };
2964 for (i = 0; i < NMTUS; ++i) {
2965 unsigned int mtu = min(mtus[i], mtu_cap);
2966 unsigned int log2 = fls(mtu);
2968 if (!(mtu & ((1 << log2) >> 2))) /* round */
2970 t3_write_reg(adap, A_TP_MTU_TABLE,
2971 (i << 24) | (log2 << 16) | mtu);
2973 for (w = 0; w < NCCTRL_WIN; ++w) {
2976 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2979 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2980 (w << 16) | (beta[w] << 13) | inc);
2986 * t3_read_hw_mtus - returns the values in the HW MTU table
2987 * @adap: the adapter
2988 * @mtus: where to store the HW MTU values
2990 * Reads the HW MTU table.
2992 void t3_read_hw_mtus(adapter_t *adap, unsigned short mtus[NMTUS])
2996 for (i = 0; i < NMTUS; ++i) {
2999 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
3000 val = t3_read_reg(adap, A_TP_MTU_TABLE);
3001 mtus[i] = val & 0x3fff;
3006 * t3_get_cong_cntl_tab - reads the congestion control table
3007 * @adap: the adapter
3008 * @incr: where to store the alpha values
3010 * Reads the additive increments programmed into the HW congestion
3013 void t3_get_cong_cntl_tab(adapter_t *adap,
3014 unsigned short incr[NMTUS][NCCTRL_WIN])
3016 unsigned int mtu, w;
3018 for (mtu = 0; mtu < NMTUS; ++mtu)
3019 for (w = 0; w < NCCTRL_WIN; ++w) {
3020 t3_write_reg(adap, A_TP_CCTRL_TABLE,
3021 0xffff0000 | (mtu << 5) | w);
3022 incr[mtu][w] = (unsigned short)t3_read_reg(adap,
3023 A_TP_CCTRL_TABLE) & 0x1fff;
3028 * t3_tp_get_mib_stats - read TP's MIB counters
3029 * @adap: the adapter
3030 * @tps: holds the returned counter values
3032 * Returns the values of TP's MIB counters.
3034 void t3_tp_get_mib_stats(adapter_t *adap, struct tp_mib_stats *tps)
3036 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *)tps,
3037 sizeof(*tps) / sizeof(u32), 0);
3041 * t3_read_pace_tbl - read the pace table
3042 * @adap: the adapter
3043 * @pace_vals: holds the returned values
3045 * Returns the values of TP's pace table in nanoseconds.
3047 void t3_read_pace_tbl(adapter_t *adap, unsigned int pace_vals[NTX_SCHED])
3049 unsigned int i, tick_ns = dack_ticks_to_usec(adap, 1000);
3051 for (i = 0; i < NTX_SCHED; i++) {
3052 t3_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
3053 pace_vals[i] = t3_read_reg(adap, A_TP_PACE_TABLE) * tick_ns;
3058 * t3_set_pace_tbl - set the pace table
3059 * @adap: the adapter
3060 * @pace_vals: the pace values in nanoseconds
3061 * @start: index of the first entry in the HW pace table to set
3062 * @n: how many entries to set
3064 * Sets (a subset of the) HW pace table.
3066 void t3_set_pace_tbl(adapter_t *adap, unsigned int *pace_vals,
3067 unsigned int start, unsigned int n)
3069 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
3071 for ( ; n; n--, start++, pace_vals++)
3072 t3_write_reg(adap, A_TP_PACE_TABLE, (start << 16) |
3073 ((*pace_vals + tick_ns / 2) / tick_ns));
3076 #define ulp_region(adap, name, start, len) \
3077 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
3078 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
3079 (start) + (len) - 1); \
3082 #define ulptx_region(adap, name, start, len) \
3083 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
3084 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
3085 (start) + (len) - 1)
3087 static void ulp_config(adapter_t *adap, const struct tp_params *p)
3089 unsigned int m = p->chan_rx_size;
3091 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
3092 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
3093 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
3094 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
3095 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
3096 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
3097 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
3098 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
3103 * t3_set_proto_sram - set the contents of the protocol sram
3104 * @adapter: the adapter
3105 * @data: the protocol image
3107 * Write the contents of the protocol SRAM.
3109 int t3_set_proto_sram(adapter_t *adap, const u8 *data)
3112 const u32 *buf = (const u32 *)data;
3114 for (i = 0; i < PROTO_SRAM_LINES; i++) {
3115 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, cpu_to_be32(*buf++));
3116 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, cpu_to_be32(*buf++));
3117 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, cpu_to_be32(*buf++));
3118 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, cpu_to_be32(*buf++));
3119 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, cpu_to_be32(*buf++));
3121 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
3122 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
3130 * t3_config_trace_filter - configure one of the tracing filters
3131 * @adapter: the adapter
3132 * @tp: the desired trace filter parameters
3133 * @filter_index: which filter to configure
3134 * @invert: if set non-matching packets are traced instead of matching ones
3135 * @enable: whether to enable or disable the filter
3137 * Configures one of the tracing filters available in HW.
3139 void t3_config_trace_filter(adapter_t *adapter, const struct trace_params *tp,
3140 int filter_index, int invert, int enable)
3142 u32 addr, key[4], mask[4];
3144 key[0] = tp->sport | (tp->sip << 16);
3145 key[1] = (tp->sip >> 16) | (tp->dport << 16);
3147 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
3149 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
3150 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
3151 mask[2] = tp->dip_mask;
3152 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
3155 key[3] |= (1 << 29);
3157 key[3] |= (1 << 28);
3159 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3160 tp_wr_indirect(adapter, addr++, key[0]);
3161 tp_wr_indirect(adapter, addr++, mask[0]);
3162 tp_wr_indirect(adapter, addr++, key[1]);
3163 tp_wr_indirect(adapter, addr++, mask[1]);
3164 tp_wr_indirect(adapter, addr++, key[2]);
3165 tp_wr_indirect(adapter, addr++, mask[2]);
3166 tp_wr_indirect(adapter, addr++, key[3]);
3167 tp_wr_indirect(adapter, addr, mask[3]);
3168 (void) t3_read_reg(adapter, A_TP_PIO_DATA);
3172 * t3_config_sched - configure a HW traffic scheduler
3173 * @adap: the adapter
3174 * @kbps: target rate in Kbps
3175 * @sched: the scheduler index
3177 * Configure a Tx HW scheduler for the target rate.
3179 int t3_config_sched(adapter_t *adap, unsigned int kbps, int sched)
3181 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3182 unsigned int clk = adap->params.vpd.cclk * 1000;
3183 unsigned int selected_cpt = 0, selected_bpt = 0;
3186 kbps *= 125; /* -> bytes */
3187 for (cpt = 1; cpt <= 255; cpt++) {
3189 bpt = (kbps + tps / 2) / tps;
3190 if (bpt > 0 && bpt <= 255) {
3192 delta = v >= kbps ? v - kbps : kbps - v;
3193 if (delta < mindelta) {
3198 } else if (selected_cpt)
3204 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3205 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3206 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3208 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3210 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3211 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3216 * t3_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
3217 * @adap: the adapter
3218 * @sched: the scheduler index
3219 * @ipg: the interpacket delay in tenths of nanoseconds
3221 * Set the interpacket delay for a HW packet rate scheduler.
3223 int t3_set_sched_ipg(adapter_t *adap, int sched, unsigned int ipg)
3225 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3227 /* convert ipg to nearest number of core clocks */
3228 ipg *= core_ticks_per_usec(adap);
3229 ipg = (ipg + 5000) / 10000;
3233 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3234 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3236 v = (v & 0xffff) | (ipg << 16);
3238 v = (v & 0xffff0000) | ipg;
3239 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3240 t3_read_reg(adap, A_TP_TM_PIO_DATA);
3245 * t3_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3246 * @adap: the adapter
3247 * @sched: the scheduler index
3248 * @kbps: the byte rate in Kbps
3249 * @ipg: the interpacket delay in tenths of nanoseconds
3251 * Return the current configuration of a HW Tx scheduler.
3253 void t3_get_tx_sched(adapter_t *adap, unsigned int sched, unsigned int *kbps,
3256 unsigned int v, addr, bpt, cpt;
3259 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3260 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3261 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3264 bpt = (v >> 8) & 0xff;
3267 *kbps = 0; /* scheduler disabled */
3269 v = (adap->params.vpd.cclk * 1000) / cpt;
3270 *kbps = (v * bpt) / 125;
3274 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3275 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3276 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3280 *ipg = (10000 * v) / core_ticks_per_usec(adap);
3285 * tp_init - configure TP
3286 * @adap: the adapter
3287 * @p: TP configuration parameters
3289 * Initializes the TP HW module.
3291 static int tp_init(adapter_t *adap, const struct tp_params *p)
3296 t3_set_vlan_accel(adap, 3, 0);
3298 if (is_offload(adap)) {
3299 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3300 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3301 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3304 CH_ERR(adap, "TP initialization timed out\n");
3308 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3313 * t3_mps_set_active_ports - configure port failover
3314 * @adap: the adapter
3315 * @port_mask: bitmap of active ports
3317 * Sets the active ports according to the supplied bitmap.
3319 int t3_mps_set_active_ports(adapter_t *adap, unsigned int port_mask)
3321 if (port_mask & ~((1 << adap->params.nports) - 1))
3323 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3324 port_mask << S_PORT0ACTIVE);
3329 * chan_init_hw - channel-dependent HW initialization
3330 * @adap: the adapter
3331 * @chan_map: bitmap of Tx channels being used
3333 * Perform the bits of HW initialization that are dependent on the Tx
3334 * channels being used.
3336 static void chan_init_hw(adapter_t *adap, unsigned int chan_map)
3340 if (chan_map != 3) { /* one channel */
3341 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3342 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3343 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3344 (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3345 F_TPTXPORT1EN | F_PORT1ACTIVE));
3346 t3_write_reg(adap, A_PM1_TX_CFG,
3347 chan_map == 1 ? 0xffffffff : 0);
3349 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3350 V_TX_MOD_QUEUE_REQ_MAP(0xff));
3351 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xd9c8);
3352 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfbea);
3353 } else { /* two channels */
3354 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3355 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3356 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3357 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3358 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3359 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3361 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3362 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3363 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3364 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3365 for (i = 0; i < 16; i++)
3366 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3367 (i << 16) | 0x1010);
3368 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xba98);
3369 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfedc);
3373 static int calibrate_xgm(adapter_t *adapter)
3375 if (uses_xaui(adapter)) {
3378 for (i = 0; i < 5; ++i) {
3379 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3380 (void) t3_read_reg(adapter, A_XGM_XAUI_IMP);
3382 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3383 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3384 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3385 V_XAUIIMP(G_CALIMP(v) >> 2));
3389 CH_ERR(adapter, "MAC calibration failed\n");
3392 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3393 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3394 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3395 F_XGM_IMPSETUPDATE);
3400 static void calibrate_xgm_t3b(adapter_t *adapter)
3402 if (!uses_xaui(adapter)) {
3403 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3404 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3405 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3406 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3407 F_XGM_IMPSETUPDATE);
3408 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3410 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3411 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3415 struct mc7_timing_params {
3416 unsigned char ActToPreDly;
3417 unsigned char ActToRdWrDly;
3418 unsigned char PreCyc;
3419 unsigned char RefCyc[5];
3420 unsigned char BkCyc;
3421 unsigned char WrToRdDly;
3422 unsigned char RdToWrDly;
3426 * Write a value to a register and check that the write completed. These
3427 * writes normally complete in a cycle or two, so one read should suffice.
3428 * The very first read exists to flush the posted write to the device.
3430 static int wrreg_wait(adapter_t *adapter, unsigned int addr, u32 val)
3432 t3_write_reg(adapter, addr, val);
3433 (void) t3_read_reg(adapter, addr); /* flush */
3434 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3436 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3440 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3442 static const unsigned int mc7_mode[] = {
3443 0x632, 0x642, 0x652, 0x432, 0x442
3445 static const struct mc7_timing_params mc7_timings[] = {
3446 { 12, 3, 4, { 20, 28, 34, 52, 0 }, 15, 6, 4 },
3447 { 12, 4, 5, { 20, 28, 34, 52, 0 }, 16, 7, 4 },
3448 { 12, 5, 6, { 20, 28, 34, 52, 0 }, 17, 8, 4 },
3449 { 9, 3, 4, { 15, 21, 26, 39, 0 }, 12, 6, 4 },
3450 { 9, 4, 5, { 15, 21, 26, 39, 0 }, 13, 7, 4 }
3454 unsigned int width, density, slow, attempts;
3455 adapter_t *adapter = mc7->adapter;
3456 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3461 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3462 slow = val & F_SLOW;
3463 width = G_WIDTH(val);
3464 density = G_DEN(val);
3466 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3467 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3471 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3472 (void) t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3474 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3475 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3476 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3482 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3483 V_ACTTOPREDLY(p->ActToPreDly) |
3484 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3485 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3486 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3488 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3489 val | F_CLKEN | F_TERM150);
3490 (void) t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3493 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3498 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3499 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3500 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3501 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3505 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3506 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL,
3511 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3512 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3513 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3514 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3515 mc7_mode[mem_type]) ||
3516 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3517 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3520 /* clock value is in KHz */
3521 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3522 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3524 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3525 F_PERREFEN | V_PREREFDIV(mc7_clock));
3526 (void) t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3528 t3_write_reg(adapter, mc7->offset + A_MC7_ECC,
3529 F_ECCGENEN | F_ECCCHKEN);
3530 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3531 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3532 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3533 (mc7->size << width) - 1);
3534 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3535 (void) t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3540 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3541 } while ((val & F_BUSY) && --attempts);
3543 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3547 /* Enable normal memory accesses. */
3548 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3555 static void config_pcie(adapter_t *adap)
3557 static const u16 ack_lat[4][6] = {
3558 { 237, 416, 559, 1071, 2095, 4143 },
3559 { 128, 217, 289, 545, 1057, 2081 },
3560 { 73, 118, 154, 282, 538, 1050 },
3561 { 67, 107, 86, 150, 278, 534 }
3563 static const u16 rpl_tmr[4][6] = {
3564 { 711, 1248, 1677, 3213, 6285, 12429 },
3565 { 384, 651, 867, 1635, 3171, 6243 },
3566 { 219, 354, 462, 846, 1614, 3150 },
3567 { 201, 321, 258, 450, 834, 1602 }
3571 unsigned int log2_width, pldsize;
3572 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3574 t3_os_pci_read_config_2(adap,
3575 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3577 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3579 t3_os_pci_read_config_2(adap,
3580 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3583 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3584 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3585 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3586 log2_width = fls(adap->params.pci.width) - 1;
3587 acklat = ack_lat[log2_width][pldsize];
3588 if (val & 1) /* check LOsEnable */
3589 acklat += fst_trn_tx * 4;
3590 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3592 if (adap->params.rev == 0)
3593 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3594 V_T3A_ACKLAT(M_T3A_ACKLAT),
3595 V_T3A_ACKLAT(acklat));
3597 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3600 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3601 V_REPLAYLMT(rpllmt));
3603 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3604 t3_set_reg_field(adap, A_PCIE_CFG, 0,
3605 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
3609 * t3_init_hw - initialize and configure T3 HW modules
3610 * @adapter: the adapter
3611 * @fw_params: initial parameters to pass to firmware (optional)
3613 * Initialize and configure T3 HW modules. This performs the
3614 * initialization steps that need to be done once after a card is reset.
3615 * MAC and PHY initialization is handled separarely whenever a port is
3618 * @fw_params are passed to FW and their value is platform dependent.
3619 * Only the top 8 bits are available for use, the rest must be 0.
3621 int t3_init_hw(adapter_t *adapter, u32 fw_params)
3623 int err = -EIO, attempts, i;
3624 const struct vpd_params *vpd = &adapter->params.vpd;
3626 if (adapter->params.rev > 0)
3627 calibrate_xgm_t3b(adapter);
3628 else if (calibrate_xgm(adapter))
3631 if (adapter->params.nports > 2)
3632 t3_mac_reset(&adap2pinfo(adapter, 0)->mac);
3635 partition_mem(adapter, &adapter->params.tp);
3637 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3638 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3639 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3640 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3641 adapter->params.mc5.nfilters,
3642 adapter->params.mc5.nroutes))
3645 for (i = 0; i < 32; i++)
3646 if (clear_sge_ctxt(adapter, i, F_CQ))
3650 if (tp_init(adapter, &adapter->params.tp))
3653 #ifdef CONFIG_CHELSIO_T3_CORE
3654 t3_tp_set_coalescing_size(adapter,
3655 min(adapter->params.sge.max_pkt_size,
3656 MAX_RX_COALESCING_LEN), 1);
3657 t3_tp_set_max_rxsize(adapter,
3658 min(adapter->params.sge.max_pkt_size, 16384U));
3659 ulp_config(adapter, &adapter->params.tp);
3661 if (is_pcie(adapter))
3662 config_pcie(adapter);
3664 t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3665 F_DMASTOPEN | F_CLIDECEN);
3667 if (adapter->params.rev == T3_REV_C)
3668 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3669 F_CFG_CQE_SOP_MASK);
3671 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3672 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3673 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3674 chan_init_hw(adapter, adapter->params.chan_map);
3675 t3_sge_init(adapter, &adapter->params.sge);
3677 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3679 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3680 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3681 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3682 (void) t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3685 do { /* wait for uP to initialize */
3687 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3689 CH_ERR(adapter, "uP initialization timed out\n");
3699 * get_pci_mode - determine a card's PCI mode
3700 * @adapter: the adapter
3701 * @p: where to store the PCI settings
3703 * Determines a card's PCI mode and associated parameters, such as speed
3706 static void __devinit get_pci_mode(adapter_t *adapter, struct pci_params *p)
3708 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3709 u32 pci_mode, pcie_cap;
3711 pcie_cap = t3_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
3715 p->variant = PCI_VARIANT_PCIE;
3716 p->pcie_cap_addr = pcie_cap;
3717 t3_os_pci_read_config_2(adapter, pcie_cap + PCI_EXP_LNKSTA,
3719 p->width = (val >> 4) & 0x3f;
3723 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3724 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3725 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3726 pci_mode = G_PCIXINITPAT(pci_mode);
3728 p->variant = PCI_VARIANT_PCI;
3729 else if (pci_mode < 4)
3730 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3731 else if (pci_mode < 8)
3732 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3734 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3738 * init_link_config - initialize a link's SW state
3739 * @lc: structure holding the link state
3740 * @caps: link capabilities
3742 * Initializes the SW state maintained for each link, including the link's
3743 * capabilities and default speed/duplex/flow-control/autonegotiation
3746 static void __devinit init_link_config(struct link_config *lc,
3749 lc->supported = caps;
3750 lc->requested_speed = lc->speed = SPEED_INVALID;
3751 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3752 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3753 if (lc->supported & SUPPORTED_Autoneg) {
3754 lc->advertising = lc->supported;
3755 lc->autoneg = AUTONEG_ENABLE;
3756 lc->requested_fc |= PAUSE_AUTONEG;
3758 lc->advertising = 0;
3759 lc->autoneg = AUTONEG_DISABLE;
3764 * mc7_calc_size - calculate MC7 memory size
3765 * @cfg: the MC7 configuration
3767 * Calculates the size of an MC7 memory in bytes from the value of its
3768 * configuration register.
3770 static unsigned int __devinit mc7_calc_size(u32 cfg)
3772 unsigned int width = G_WIDTH(cfg);
3773 unsigned int banks = !!(cfg & F_BKS) + 1;
3774 unsigned int org = !!(cfg & F_ORG) + 1;
3775 unsigned int density = G_DEN(cfg);
3776 unsigned int MBs = ((256 << density) * banks) / (org << width);
3781 static void __devinit mc7_prep(adapter_t *adapter, struct mc7 *mc7,
3782 unsigned int base_addr, const char *name)
3786 mc7->adapter = adapter;
3788 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3789 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3790 mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3791 mc7->width = G_WIDTH(cfg);
3794 void mac_prep(struct cmac *mac, adapter_t *adapter, int index)
3796 mac->adapter = adapter;
3797 mac->multiport = adapter->params.nports > 2;
3798 if (mac->multiport) {
3799 mac->ext_port = (unsigned char)index;
3805 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3807 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3808 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3809 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3810 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3816 * early_hw_init - HW initialization done at card detection time
3817 * @adapter: the adapter
3818 * @ai: contains information about the adapter type and properties
3820 * Perfoms the part of HW initialization that is done early on when the
3821 * driver first detecs the card. Most of the HW state is initialized
3822 * lazily later on when a port or an offload function are first used.
3824 void early_hw_init(adapter_t *adapter, const struct adapter_info *ai)
3826 u32 val = V_PORTSPEED(is_10G(adapter) || adapter->params.nports > 2 ?
3829 mi1_init(adapter, ai);
3830 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3831 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3832 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3833 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3834 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3835 t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
3837 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3840 /* Enable MAC clocks so we can access the registers */
3841 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3842 (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
3844 val |= F_CLKDIVRESET_;
3845 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3846 (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
3847 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3848 (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
3852 * t3_reset_adapter - reset the adapter
3853 * @adapter: the adapter
3855 * Reset the adapter.
3857 static int t3_reset_adapter(adapter_t *adapter)
3859 int i, save_and_restore_pcie =
3860 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3863 if (save_and_restore_pcie)
3864 t3_os_pci_save_state(adapter);
3865 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3868 * Delay. Give Some time to device to reset fully.
3869 * XXX The delay time should be modified.
3871 for (i = 0; i < 10; i++) {
3873 t3_os_pci_read_config_2(adapter, 0x00, &devid);
3874 if (devid == 0x1425)
3878 if (devid != 0x1425)
3881 if (save_and_restore_pcie)
3882 t3_os_pci_restore_state(adapter);
3886 static int init_parity(adapter_t *adap)
3890 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3893 for (err = i = 0; !err && i < 16; i++)
3894 err = clear_sge_ctxt(adap, i, F_EGRESS);
3895 for (i = 0xfff0; !err && i <= 0xffff; i++)
3896 err = clear_sge_ctxt(adap, i, F_EGRESS);
3897 for (i = 0; !err && i < SGE_QSETS; i++)
3898 err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3902 t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3903 for (i = 0; i < 4; i++)
3904 for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3905 t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3906 F_IBQDBGWR | V_IBQDBGQID(i) |
3907 V_IBQDBGADDR(addr));
3908 err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3909 F_IBQDBGBUSY, 0, 2, 1);
3917 * t3_prep_adapter - prepare SW and HW for operation
3918 * @adapter: the adapter
3919 * @ai: contains information about the adapter type and properties
3921 * Initialize adapter SW state for the various HW modules, set initial
3922 * values for some adapter tunables, take PHYs out of reset, and
3923 * initialize the MDIO interface.
3925 int __devinit t3_prep_adapter(adapter_t *adapter,
3926 const struct adapter_info *ai, int reset)
3929 unsigned int i, j = 0;
3931 get_pci_mode(adapter, &adapter->params.pci);
3933 adapter->params.info = ai;
3934 adapter->params.nports = ai->nports0 + ai->nports1;
3935 adapter->params.chan_map = !!ai->nports0 | (!!ai->nports1 << 1);
3936 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3937 adapter->params.linkpoll_period = 0;
3938 if (adapter->params.nports > 2)
3939 adapter->params.stats_update_period = VSC_STATS_ACCUM_SECS;
3941 adapter->params.stats_update_period = is_10G(adapter) ?
3942 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3943 adapter->params.pci.vpd_cap_addr =
3944 t3_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
3946 ret = get_vpd_params(adapter, &adapter->params.vpd);
3950 if (reset && t3_reset_adapter(adapter))
3953 t3_sge_prep(adapter, &adapter->params.sge);
3955 if (adapter->params.vpd.mclk) {
3956 struct tp_params *p = &adapter->params.tp;
3958 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3959 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3960 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3962 p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
3963 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3964 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3965 p->cm_size = t3_mc7_size(&adapter->cm);
3966 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3967 p->chan_tx_size = p->pmtx_size / p->nchan;
3968 p->rx_pg_size = 64 * 1024;
3969 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3970 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3971 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3972 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3973 adapter->params.rev > 0 ? 12 : 6;
3974 p->tre = fls(adapter->params.vpd.cclk / (1000 / TP_TMR_RES)) -
3976 p->dack_re = fls(adapter->params.vpd.cclk / 10) - 1; /* 100us */
3979 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3980 t3_mc7_size(&adapter->pmtx) &&
3981 t3_mc7_size(&adapter->cm);
3983 if (is_offload(adapter)) {
3984 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3985 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3986 DEFAULT_NFILTERS : 0;
3987 adapter->params.mc5.nroutes = 0;
3988 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3990 #ifdef CONFIG_CHELSIO_T3_CORE
3991 init_mtus(adapter->params.mtus);
3992 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3996 early_hw_init(adapter, ai);
3997 ret = init_parity(adapter);
4001 if (adapter->params.nports > 2 &&
4002 (ret = t3_vsc7323_init(adapter, adapter->params.nports)))
4005 for_each_port(adapter, i) {
4007 const struct port_type_info *pti;
4008 struct port_info *p = adap2pinfo(adapter, i);
4010 while (!adapter->params.vpd.port_type[j])
4013 pti = &port_types[adapter->params.vpd.port_type[j]];
4014 ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
4018 mac_prep(&p->mac, adapter, j);
4022 * The VPD EEPROM stores the base Ethernet address for the
4023 * card. A port's address is derived from the base by adding
4024 * the port's index to the base's low octet.
4026 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
4027 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
4029 t3_os_set_hw_addr(adapter, i, hw_addr);
4030 init_link_config(&p->link_config, p->phy.caps);
4031 p->phy.ops->power_down(&p->phy, 1);
4032 if (!(p->phy.caps & SUPPORTED_IRQ))
4033 adapter->params.linkpoll_period = 10;
4040 * t3_reinit_adapter - prepare HW for operation again
4041 * @adapter: the adapter
4043 * Put HW in the same state as @t3_prep_adapter without any changes to
4044 * SW state. This is a cut down version of @t3_prep_adapter intended
4045 * to be used after events that wipe out HW state but preserve SW state,
4046 * e.g., EEH. The device must be reset before calling this.
4048 int t3_reinit_adapter(adapter_t *adap)
4053 early_hw_init(adap, adap->params.info);
4054 ret = init_parity(adap);
4058 if (adap->params.nports > 2 &&
4059 (ret = t3_vsc7323_init(adap, adap->params.nports)))
4062 for_each_port(adap, i) {
4063 const struct port_type_info *pti;
4064 struct port_info *p = adap2pinfo(adap, i);
4066 while (!adap->params.vpd.port_type[++j])
4069 pti = &port_types[adap->params.vpd.port_type[j]];
4070 ret = pti->phy_prep(&p->phy, adap, p->phy.addr, NULL);
4073 p->phy.ops->power_down(&p->phy, 1);
4078 void t3_led_ready(adapter_t *adapter)
4080 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
4084 void t3_port_failover(adapter_t *adapter, int port)
4088 val = port ? F_PORT1ACTIVE : F_PORT0ACTIVE;
4089 t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4093 void t3_failover_done(adapter_t *adapter, int port)
4095 t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4096 F_PORT0ACTIVE | F_PORT1ACTIVE);
4099 void t3_failover_clear(adapter_t *adapter)
4101 t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4102 F_PORT0ACTIVE | F_PORT1ACTIVE);