1 /**************************************************************************
3 Copyright (c) 2007, Chelsio Inc.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
28 ***************************************************************************/
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
35 #include <cxgb_include.h>
37 #include <dev/cxgb/cxgb_include.h>
41 #define msleep t3_os_sleep
44 * t3_wait_op_done_val - wait until an operation is completed
45 * @adapter: the adapter performing the operation
46 * @reg: the register to check for completion
47 * @mask: a single-bit field within @reg that indicates completion
48 * @polarity: the value of the field when the operation is completed
49 * @attempts: number of check iterations
50 * @delay: delay in usecs between iterations
51 * @valp: where to store the value of the register at completion time
53 * Wait until an operation is completed by checking a bit in a register
54 * up to @attempts times. If @valp is not NULL the value of the register
55 * at the time it indicated completion is stored there. Returns 0 if the
56 * operation completes and -EAGAIN otherwise.
58 int t3_wait_op_done_val(adapter_t *adapter, int reg, u32 mask, int polarity,
59 int attempts, int delay, u32 *valp)
62 u32 val = t3_read_reg(adapter, reg);
64 if (!!(val & mask) == polarity) {
77 * t3_write_regs - write a bunch of registers
78 * @adapter: the adapter to program
79 * @p: an array of register address/register value pairs
80 * @n: the number of address/value pairs
81 * @offset: register address offset
83 * Takes an array of register address/register value pairs and writes each
84 * value to the corresponding register. Register addresses are adjusted
85 * by the supplied offset.
87 void t3_write_regs(adapter_t *adapter, const struct addr_val_pair *p, int n,
91 t3_write_reg(adapter, p->reg_addr + offset, p->val);
97 * t3_set_reg_field - set a register field to a value
98 * @adapter: the adapter to program
99 * @addr: the register address
100 * @mask: specifies the portion of the register to modify
101 * @val: the new value for the register field
103 * Sets a register field specified by the supplied mask to the
106 void t3_set_reg_field(adapter_t *adapter, unsigned int addr, u32 mask, u32 val)
108 u32 v = t3_read_reg(adapter, addr) & ~mask;
110 t3_write_reg(adapter, addr, v | val);
111 (void) t3_read_reg(adapter, addr); /* flush */
115 * t3_read_indirect - read indirectly addressed registers
117 * @addr_reg: register holding the indirect address
118 * @data_reg: register holding the value of the indirect register
119 * @vals: where the read register values are stored
120 * @start_idx: index of first indirect register to read
121 * @nregs: how many indirect registers to read
123 * Reads registers that are accessed indirectly through an address/data
126 static void t3_read_indirect(adapter_t *adap, unsigned int addr_reg,
127 unsigned int data_reg, u32 *vals, unsigned int nregs,
128 unsigned int start_idx)
131 t3_write_reg(adap, addr_reg, start_idx);
132 *vals++ = t3_read_reg(adap, data_reg);
138 * t3_mc7_bd_read - read from MC7 through backdoor accesses
139 * @mc7: identifies MC7 to read from
140 * @start: index of first 64-bit word to read
141 * @n: number of 64-bit words to read
142 * @buf: where to store the read result
144 * Read n 64-bit words from MC7 starting at word start, using backdoor
147 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
150 static int shift[] = { 0, 0, 16, 24 };
151 static int step[] = { 0, 32, 16, 8 };
153 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
154 adapter_t *adap = mc7->adapter;
156 if (start >= size64 || start + n > size64)
159 start *= (8 << mc7->width);
164 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
168 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR,
170 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
171 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
172 while ((val & F_BUSY) && attempts--)
173 val = t3_read_reg(adap,
174 mc7->offset + A_MC7_BD_OP);
178 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
179 if (mc7->width == 0) {
180 val64 = t3_read_reg(adap,
181 mc7->offset + A_MC7_BD_DATA0);
182 val64 |= (u64)val << 32;
185 val >>= shift[mc7->width];
186 val64 |= (u64)val << (step[mc7->width] * i);
198 static void mi1_init(adapter_t *adap, const struct adapter_info *ai)
200 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
201 u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
204 if (!(ai->caps & SUPPORTED_10000baseT_Full))
206 t3_write_reg(adap, A_MI1_CFG, val);
209 #define MDIO_ATTEMPTS 20
212 * MI1 read/write operations for direct-addressed PHYs.
214 static int mi1_read(adapter_t *adapter, int phy_addr, int mmd_addr,
215 int reg_addr, unsigned int *valp)
218 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
224 t3_write_reg(adapter, A_MI1_ADDR, addr);
225 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
226 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
228 *valp = t3_read_reg(adapter, A_MI1_DATA);
229 MDIO_UNLOCK(adapter);
233 static int mi1_write(adapter_t *adapter, int phy_addr, int mmd_addr,
234 int reg_addr, unsigned int val)
237 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
243 t3_write_reg(adapter, A_MI1_ADDR, addr);
244 t3_write_reg(adapter, A_MI1_DATA, val);
245 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
246 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
247 MDIO_UNLOCK(adapter);
251 static struct mdio_ops mi1_mdio_ops = {
257 * MI1 read/write operations for indirect-addressed PHYs.
259 static int mi1_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
260 int reg_addr, unsigned int *valp)
263 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
266 t3_write_reg(adapter, A_MI1_ADDR, addr);
267 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
268 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
269 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
271 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
272 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
275 *valp = t3_read_reg(adapter, A_MI1_DATA);
277 MDIO_UNLOCK(adapter);
281 static int mi1_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
282 int reg_addr, unsigned int val)
285 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
288 t3_write_reg(adapter, A_MI1_ADDR, addr);
289 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
290 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
291 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
293 t3_write_reg(adapter, A_MI1_DATA, val);
294 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
295 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
298 MDIO_UNLOCK(adapter);
302 static struct mdio_ops mi1_mdio_ext_ops = {
308 * t3_mdio_change_bits - modify the value of a PHY register
309 * @phy: the PHY to operate on
310 * @mmd: the device address
311 * @reg: the register address
312 * @clear: what part of the register value to mask off
313 * @set: what part of the register value to set
315 * Changes the value of a PHY register by applying a mask to its current
316 * value and ORing the result with a new value.
318 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
324 ret = mdio_read(phy, mmd, reg, &val);
327 ret = mdio_write(phy, mmd, reg, val | set);
333 * t3_phy_reset - reset a PHY block
334 * @phy: the PHY to operate on
335 * @mmd: the device address of the PHY block to reset
336 * @wait: how long to wait for the reset to complete in 1ms increments
338 * Resets a PHY block and optionally waits for the reset to complete.
339 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
342 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
347 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
352 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
358 } while (ctl && --wait);
364 * t3_phy_advertise - set the PHY advertisement registers for autoneg
365 * @phy: the PHY to operate on
366 * @advert: bitmap of capabilities the PHY should advertise
368 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
369 * requested capabilities.
371 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
374 unsigned int val = 0;
376 err = mdio_read(phy, 0, MII_CTRL1000, &val);
380 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
381 if (advert & ADVERTISED_1000baseT_Half)
382 val |= ADVERTISE_1000HALF;
383 if (advert & ADVERTISED_1000baseT_Full)
384 val |= ADVERTISE_1000FULL;
386 err = mdio_write(phy, 0, MII_CTRL1000, val);
391 if (advert & ADVERTISED_10baseT_Half)
392 val |= ADVERTISE_10HALF;
393 if (advert & ADVERTISED_10baseT_Full)
394 val |= ADVERTISE_10FULL;
395 if (advert & ADVERTISED_100baseT_Half)
396 val |= ADVERTISE_100HALF;
397 if (advert & ADVERTISED_100baseT_Full)
398 val |= ADVERTISE_100FULL;
399 if (advert & ADVERTISED_Pause)
400 val |= ADVERTISE_PAUSE_CAP;
401 if (advert & ADVERTISED_Asym_Pause)
402 val |= ADVERTISE_PAUSE_ASYM;
403 return mdio_write(phy, 0, MII_ADVERTISE, val);
407 * t3_phy_advertise_fiber - set fiber PHY advertisement register
408 * @phy: the PHY to operate on
409 * @advert: bitmap of capabilities the PHY should advertise
411 * Sets a fiber PHY's advertisement register to advertise the
412 * requested capabilities.
414 int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
416 unsigned int val = 0;
418 if (advert & ADVERTISED_1000baseT_Half)
419 val |= ADVERTISE_1000XHALF;
420 if (advert & ADVERTISED_1000baseT_Full)
421 val |= ADVERTISE_1000XFULL;
422 if (advert & ADVERTISED_Pause)
423 val |= ADVERTISE_1000XPAUSE;
424 if (advert & ADVERTISED_Asym_Pause)
425 val |= ADVERTISE_1000XPSE_ASYM;
426 return mdio_write(phy, 0, MII_ADVERTISE, val);
430 * t3_set_phy_speed_duplex - force PHY speed and duplex
431 * @phy: the PHY to operate on
432 * @speed: requested PHY speed
433 * @duplex: requested PHY duplex
435 * Force a 10/100/1000 PHY's speed and duplex. This also disables
436 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
438 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
443 err = mdio_read(phy, 0, MII_BMCR, &ctl);
448 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
449 if (speed == SPEED_100)
450 ctl |= BMCR_SPEED100;
451 else if (speed == SPEED_1000)
452 ctl |= BMCR_SPEED1000;
455 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
456 if (duplex == DUPLEX_FULL)
457 ctl |= BMCR_FULLDPLX;
459 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
460 ctl |= BMCR_ANENABLE;
461 return mdio_write(phy, 0, MII_BMCR, ctl);
464 int t3_phy_lasi_intr_enable(struct cphy *phy)
466 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
469 int t3_phy_lasi_intr_disable(struct cphy *phy)
471 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
474 int t3_phy_lasi_intr_clear(struct cphy *phy)
478 return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
481 int t3_phy_lasi_intr_handler(struct cphy *phy)
484 int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
488 return (status & 1) ? cphy_cause_link_change : 0;
491 static struct adapter_info t3_adap_info[] = {
493 F_GPIO2_OEN | F_GPIO4_OEN |
494 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
495 &mi1_mdio_ops, "Chelsio PE9000" },
497 F_GPIO2_OEN | F_GPIO4_OEN |
498 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
499 &mi1_mdio_ops, "Chelsio T302" },
501 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
502 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
503 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
504 &mi1_mdio_ext_ops, "Chelsio T310" },
506 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
507 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
508 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
509 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
510 &mi1_mdio_ext_ops, "Chelsio T320" },
512 F_GPIO5_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO5_OUT_VAL |
513 F_GPIO6_OUT_VAL | F_GPIO7_OUT_VAL,
514 { S_GPIO1, S_GPIO2, S_GPIO3, S_GPIO4 }, SUPPORTED_AUI,
515 &mi1_mdio_ops, "Chelsio T304" },
519 * Return the adapter_info structure with a given index. Out-of-range indices
522 const struct adapter_info *t3_get_adapter_info(unsigned int id)
524 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
527 struct port_type_info {
528 int (*phy_prep)(struct cphy *phy, adapter_t *adapter, int phy_addr,
529 const struct mdio_ops *ops);
532 static struct port_type_info port_types[] = {
534 { t3_ael1002_phy_prep },
535 { t3_vsc8211_phy_prep },
536 { t3_mv88e1xxx_phy_prep },
537 { t3_xaui_direct_phy_prep },
538 { t3_ael2005_phy_prep },
539 { t3_qt2045_phy_prep },
540 { t3_ael1006_phy_prep },
541 { t3_tn1010_phy_prep },
544 #define VPD_ENTRY(name, len) \
545 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
548 * Partial EEPROM Vital Product Data structure. Includes only the ID and
557 VPD_ENTRY(pn, 16); /* part number */
558 VPD_ENTRY(ec, 16); /* EC level */
559 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
560 VPD_ENTRY(na, 12); /* MAC address base */
561 VPD_ENTRY(cclk, 6); /* core clock */
562 VPD_ENTRY(mclk, 6); /* mem clock */
563 VPD_ENTRY(uclk, 6); /* uP clk */
564 VPD_ENTRY(mdc, 6); /* MDIO clk */
565 VPD_ENTRY(mt, 2); /* mem timing */
566 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
567 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
568 VPD_ENTRY(port0, 2); /* PHY0 complex */
569 VPD_ENTRY(port1, 2); /* PHY1 complex */
570 VPD_ENTRY(port2, 2); /* PHY2 complex */
571 VPD_ENTRY(port3, 2); /* PHY3 complex */
572 VPD_ENTRY(rv, 1); /* csum */
573 u32 pad; /* for multiple-of-4 sizing and alignment */
576 #define EEPROM_MAX_POLL 40
577 #define EEPROM_STAT_ADDR 0x4000
578 #define VPD_BASE 0xc00
581 * t3_seeprom_read - read a VPD EEPROM location
582 * @adapter: adapter to read
583 * @addr: EEPROM address
584 * @data: where to store the read data
586 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
587 * VPD ROM capability. A zero is written to the flag bit when the
588 * addres is written to the control register. The hardware device will
589 * set the flag to 1 when 4 bytes have been read into the data register.
591 int t3_seeprom_read(adapter_t *adapter, u32 addr, u32 *data)
594 int attempts = EEPROM_MAX_POLL;
595 unsigned int base = adapter->params.pci.vpd_cap_addr;
597 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
600 t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR, (u16)addr);
603 t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
604 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
606 if (!(val & PCI_VPD_ADDR_F)) {
607 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
610 t3_os_pci_read_config_4(adapter, base + PCI_VPD_DATA, data);
611 *data = le32_to_cpu(*data);
616 * t3_seeprom_write - write a VPD EEPROM location
617 * @adapter: adapter to write
618 * @addr: EEPROM address
619 * @data: value to write
621 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
622 * VPD ROM capability.
624 int t3_seeprom_write(adapter_t *adapter, u32 addr, u32 data)
627 int attempts = EEPROM_MAX_POLL;
628 unsigned int base = adapter->params.pci.vpd_cap_addr;
630 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
633 t3_os_pci_write_config_4(adapter, base + PCI_VPD_DATA,
635 t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR,
636 (u16)addr | PCI_VPD_ADDR_F);
639 t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
640 } while ((val & PCI_VPD_ADDR_F) && --attempts);
642 if (val & PCI_VPD_ADDR_F) {
643 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
650 * t3_seeprom_wp - enable/disable EEPROM write protection
651 * @adapter: the adapter
652 * @enable: 1 to enable write protection, 0 to disable it
654 * Enables or disables write protection on the serial EEPROM.
656 int t3_seeprom_wp(adapter_t *adapter, int enable)
658 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
662 * Convert a character holding a hex digit to a number.
664 static unsigned int hex2int(unsigned char c)
666 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
670 * get_vpd_params - read VPD parameters from VPD EEPROM
671 * @adapter: adapter to read
672 * @p: where to store the parameters
674 * Reads card parameters stored in VPD EEPROM.
676 static int get_vpd_params(adapter_t *adapter, struct vpd_params *p)
682 * Card information is normally at VPD_BASE but some early cards had
685 ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
688 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
690 for (i = 0; i < sizeof(vpd); i += 4) {
691 ret = t3_seeprom_read(adapter, addr + i,
692 (u32 *)((u8 *)&vpd + i));
697 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
698 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
699 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
700 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
701 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
702 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
704 /* Old eeproms didn't have port information */
705 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
706 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
707 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
709 p->port_type[0] = (u8)hex2int(vpd.port0_data[0]);
710 p->port_type[1] = (u8)hex2int(vpd.port1_data[0]);
711 p->port_type[2] = (u8)hex2int(vpd.port2_data[0]);
712 p->port_type[3] = (u8)hex2int(vpd.port3_data[0]);
713 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
714 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
717 for (i = 0; i < 6; i++)
718 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
719 hex2int(vpd.na_data[2 * i + 1]);
723 /* BIOS boot header */
724 typedef struct boot_header_s {
725 u8 signature[2]; /* signature */
726 u8 length; /* image length (include header) */
727 u8 offset[4]; /* initialization vector */
728 u8 reserved[19]; /* reserved */
729 u8 exheader[2]; /* offset to expansion header */
732 /* serial flash and firmware constants */
734 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
735 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
736 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
738 /* flash command opcodes */
739 SF_PROG_PAGE = 2, /* program page */
740 SF_WR_DISABLE = 4, /* disable writes */
741 SF_RD_STATUS = 5, /* read status register */
742 SF_WR_ENABLE = 6, /* enable writes */
743 SF_RD_DATA_FAST = 0xb, /* read flash */
744 SF_ERASE_SECTOR = 0xd8, /* erase sector */
746 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
747 FW_VERS_ADDR = 0x77ffc, /* flash address holding FW version */
748 FW_MIN_SIZE = 8, /* at least version and csum */
749 FW_MAX_SIZE = FW_VERS_ADDR - FW_FLASH_BOOT_ADDR,
751 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
752 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */
753 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */
754 BOOT_MIN_SIZE = sizeof(boot_header_t), /* at least basic header */
755 BOOT_MAX_SIZE = 0xff*BOOT_SIZE_INC /* 1 byte * length increment */
759 * sf1_read - read data from the serial flash
760 * @adapter: the adapter
761 * @byte_cnt: number of bytes to read
762 * @cont: whether another operation will be chained
763 * @valp: where to store the read data
765 * Reads up to 4 bytes of data from the serial flash. The location of
766 * the read needs to be specified prior to calling this by issuing the
767 * appropriate commands to the serial flash.
769 static int sf1_read(adapter_t *adapter, unsigned int byte_cnt, int cont,
774 if (!byte_cnt || byte_cnt > 4)
776 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
778 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
779 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
781 *valp = t3_read_reg(adapter, A_SF_DATA);
786 * sf1_write - write data to the serial flash
787 * @adapter: the adapter
788 * @byte_cnt: number of bytes to write
789 * @cont: whether another operation will be chained
790 * @val: value to write
792 * Writes up to 4 bytes of data to the serial flash. The location of
793 * the write needs to be specified prior to calling this by issuing the
794 * appropriate commands to the serial flash.
796 static int sf1_write(adapter_t *adapter, unsigned int byte_cnt, int cont,
799 if (!byte_cnt || byte_cnt > 4)
801 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
803 t3_write_reg(adapter, A_SF_DATA, val);
804 t3_write_reg(adapter, A_SF_OP,
805 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
806 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
810 * flash_wait_op - wait for a flash operation to complete
811 * @adapter: the adapter
812 * @attempts: max number of polls of the status register
813 * @delay: delay between polls in ms
815 * Wait for a flash operation to complete by polling the status register.
817 static int flash_wait_op(adapter_t *adapter, int attempts, int delay)
823 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
824 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
836 * t3_read_flash - read words from serial flash
837 * @adapter: the adapter
838 * @addr: the start address for the read
839 * @nwords: how many 32-bit words to read
840 * @data: where to store the read data
841 * @byte_oriented: whether to store data as bytes or as words
843 * Read the specified number of 32-bit words from the serial flash.
844 * If @byte_oriented is set the read data is stored as a byte array
845 * (i.e., big-endian), otherwise as 32-bit words in the platform's
848 int t3_read_flash(adapter_t *adapter, unsigned int addr, unsigned int nwords,
849 u32 *data, int byte_oriented)
853 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
856 addr = swab32(addr) | SF_RD_DATA_FAST;
858 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
859 (ret = sf1_read(adapter, 1, 1, data)) != 0)
862 for ( ; nwords; nwords--, data++) {
863 ret = sf1_read(adapter, 4, nwords > 1, data);
867 *data = htonl(*data);
873 * t3_write_flash - write up to a page of data to the serial flash
874 * @adapter: the adapter
875 * @addr: the start address to write
876 * @n: length of data to write
877 * @data: the data to write
878 * @byte_oriented: whether to store data as bytes or as words
880 * Writes up to a page of data (256 bytes) to the serial flash starting
881 * at the given address.
882 * If @byte_oriented is set the write data is stored as a 32-bit
883 * big-endian array, otherwise in the processor's native endianess.
886 static int t3_write_flash(adapter_t *adapter, unsigned int addr,
887 unsigned int n, const u8 *data,
892 unsigned int c, left, val, offset = addr & 0xff;
894 if (addr + n > SF_SIZE || offset + n > 256)
897 val = swab32(addr) | SF_PROG_PAGE;
899 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
900 (ret = sf1_write(adapter, 4, 1, val)) != 0)
903 for (left = n; left; left -= c) {
905 val = *(const u32*)data;
910 ret = sf1_write(adapter, c, c != left, val);
914 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
917 /* Read the page to verify the write succeeded */
918 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
923 if (memcmp(data - n, (u8 *)buf + offset, n))
929 * t3_get_tp_version - read the tp sram version
930 * @adapter: the adapter
931 * @vers: where to place the version
933 * Reads the protocol sram version from sram.
935 int t3_get_tp_version(adapter_t *adapter, u32 *vers)
939 /* Get version loaded in SRAM */
940 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
941 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
946 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
952 * t3_check_tpsram_version - read the tp sram version
953 * @adapter: the adapter
956 int t3_check_tpsram_version(adapter_t *adapter, int *must_load)
960 unsigned int major, minor;
962 if (adapter->params.rev == T3_REV_A)
967 ret = t3_get_tp_version(adapter, &vers);
971 vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
973 major = G_TP_VERSION_MAJOR(vers);
974 minor = G_TP_VERSION_MINOR(vers);
976 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
979 if (major != TP_VERSION_MAJOR)
980 CH_ERR(adapter, "found wrong TP version (%u.%u), "
981 "driver needs version %d.%d\n", major, minor,
982 TP_VERSION_MAJOR, TP_VERSION_MINOR);
985 CH_ERR(adapter, "found wrong TP version (%u.%u), "
986 "driver compiled for version %d.%d\n", major, minor,
987 TP_VERSION_MAJOR, TP_VERSION_MINOR);
993 * t3_check_tpsram - check if provided protocol SRAM
994 * is compatible with this driver
995 * @adapter: the adapter
996 * @tp_sram: the firmware image to write
999 * Checks if an adapter's tp sram is compatible with the driver.
1000 * Returns 0 if the versions are compatible, a negative error otherwise.
1002 int t3_check_tpsram(adapter_t *adapter, const u8 *tp_sram, unsigned int size)
1006 const u32 *p = (const u32 *)tp_sram;
1008 /* Verify checksum */
1009 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1010 csum += ntohl(p[i]);
1011 if (csum != 0xffffffff) {
1012 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
1020 enum fw_version_type {
1026 * t3_get_fw_version - read the firmware version
1027 * @adapter: the adapter
1028 * @vers: where to place the version
1030 * Reads the FW version from flash.
1032 int t3_get_fw_version(adapter_t *adapter, u32 *vers)
1034 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1038 * t3_check_fw_version - check if the FW is compatible with this driver
1039 * @adapter: the adapter
1041 * Checks if an adapter's FW is compatible with the driver. Returns 0
1042 * if the versions are compatible, a negative error otherwise.
1044 int t3_check_fw_version(adapter_t *adapter, int *must_load)
1048 unsigned int type, major, minor;
1051 ret = t3_get_fw_version(adapter, &vers);
1055 type = G_FW_VERSION_TYPE(vers);
1056 major = G_FW_VERSION_MAJOR(vers);
1057 minor = G_FW_VERSION_MINOR(vers);
1059 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1060 minor == FW_VERSION_MINOR)
1063 if (major != FW_VERSION_MAJOR)
1064 CH_ERR(adapter, "found wrong FW version(%u.%u), "
1065 "driver needs version %u.%u\n", major, minor,
1066 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1067 else if ((int)minor < FW_VERSION_MINOR) {
1069 CH_WARN(adapter, "found old FW minor version(%u.%u), "
1070 "driver compiled for version %u.%u\n", major, minor,
1071 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1073 CH_WARN(adapter, "found newer FW version(%u.%u), "
1074 "driver compiled for version %u.%u\n", major, minor,
1075 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1082 * t3_flash_erase_sectors - erase a range of flash sectors
1083 * @adapter: the adapter
1084 * @start: the first sector to erase
1085 * @end: the last sector to erase
1087 * Erases the sectors in the given range.
1089 static int t3_flash_erase_sectors(adapter_t *adapter, int start, int end)
1091 while (start <= end) {
1094 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1095 (ret = sf1_write(adapter, 4, 0,
1096 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1097 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1105 * t3_load_fw - download firmware
1106 * @adapter: the adapter
1107 * @fw_data: the firmware image to write
1110 * Write the supplied firmware image to the card's serial flash.
1111 * The FW image has the following sections: @size - 8 bytes of code and
1112 * data, followed by 4 bytes of FW version, followed by the 32-bit
1113 * 1's complement checksum of the whole image.
1115 int t3_load_fw(adapter_t *adapter, const u8 *fw_data, unsigned int size)
1119 const u32 *p = (const u32 *)fw_data;
1120 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1122 if ((size & 3) || size < FW_MIN_SIZE)
1124 if (size - 8 > FW_MAX_SIZE)
1127 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1128 csum += ntohl(p[i]);
1129 if (csum != 0xffffffff) {
1130 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1135 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1139 size -= 8; /* trim off version and checksum */
1140 for (addr = FW_FLASH_BOOT_ADDR; size; ) {
1141 unsigned int chunk_size = min(size, 256U);
1143 ret = t3_write_flash(adapter, addr, chunk_size, fw_data, 1);
1148 fw_data += chunk_size;
1152 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data, 1);
1155 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1160 * t3_load_boot - download boot flash
1161 * @adapter: the adapter
1162 * @boot_data: the boot image to write
1165 * Write the supplied boot image to the card's serial flash.
1166 * The boot image has the following sections: a 28-byte header and the
1169 int t3_load_boot(adapter_t *adapter, u8 *boot_data, unsigned int size)
1171 boot_header_t *header = (boot_header_t *)boot_data;
1174 unsigned int boot_sector = BOOT_FLASH_BOOT_ADDR >> 16;
1175 unsigned int boot_end = (BOOT_FLASH_BOOT_ADDR + size - 1) >> 16;
1178 * Perform some primitive sanity testing to avoid accidentally
1179 * writing garbage over the boot sectors. We ought to check for
1180 * more but it's not worth it for now ...
1182 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
1183 CH_ERR(adapter, "boot image too small/large\n");
1186 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE) {
1187 CH_ERR(adapter, "boot image missing signature\n");
1190 if (header->length * BOOT_SIZE_INC != size) {
1191 CH_ERR(adapter, "boot image header length != image length\n");
1195 ret = t3_flash_erase_sectors(adapter, boot_sector, boot_end);
1199 for (addr = BOOT_FLASH_BOOT_ADDR; size; ) {
1200 unsigned int chunk_size = min(size, 256U);
1202 ret = t3_write_flash(adapter, addr, chunk_size, boot_data, 0);
1207 boot_data += chunk_size;
1213 CH_ERR(adapter, "boot image download failed, error %d\n", ret);
1217 #define CIM_CTL_BASE 0x2000
1220 * t3_cim_ctl_blk_read - read a block from CIM control region
1221 * @adap: the adapter
1222 * @addr: the start address within the CIM control region
1223 * @n: number of words to read
1224 * @valp: where to store the result
1226 * Reads a block of 4-byte words from the CIM control region.
1228 int t3_cim_ctl_blk_read(adapter_t *adap, unsigned int addr, unsigned int n,
1233 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1236 for ( ; !ret && n--; addr += 4) {
1237 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1238 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1241 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1247 * t3_link_changed - handle interface link changes
1248 * @adapter: the adapter
1249 * @port_id: the port index that changed link state
1251 * Called when a port's link settings change to propagate the new values
1252 * to the associated PHY and MAC. After performing the common tasks it
1253 * invokes an OS-specific handler.
1255 void t3_link_changed(adapter_t *adapter, int port_id)
1257 int link_ok, speed, duplex, fc;
1258 struct port_info *pi = adap2pinfo(adapter, port_id);
1259 struct cphy *phy = &pi->phy;
1260 struct cmac *mac = &pi->mac;
1261 struct link_config *lc = &pi->link_config;
1263 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1265 if (lc->requested_fc & PAUSE_AUTONEG)
1266 fc &= lc->requested_fc;
1268 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1270 if (link_ok == lc->link_ok && speed == lc->speed &&
1271 duplex == lc->duplex && fc == lc->fc)
1272 return; /* nothing changed */
1274 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1275 uses_xaui(adapter)) {
1278 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1279 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1281 lc->link_ok = (unsigned char)link_ok;
1282 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1283 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1285 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1286 /* Set MAC speed, duplex, and flow control to match PHY. */
1287 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1288 lc->fc = (unsigned char)fc;
1291 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1295 * t3_link_start - apply link configuration to MAC/PHY
1296 * @phy: the PHY to setup
1297 * @mac: the MAC to setup
1298 * @lc: the requested link configuration
1300 * Set up a port's MAC and PHY according to a desired link configuration.
1301 * - If the PHY can auto-negotiate first decide what to advertise, then
1302 * enable/disable auto-negotiation as desired, and reset.
1303 * - If the PHY does not auto-negotiate just reset it.
1304 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1305 * otherwise do it later based on the outcome of auto-negotiation.
1307 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1309 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1312 if (lc->supported & SUPPORTED_Autoneg) {
1313 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1315 lc->advertising |= ADVERTISED_Asym_Pause;
1317 lc->advertising |= ADVERTISED_Pause;
1319 phy->ops->advertise(phy, lc->advertising);
1321 if (lc->autoneg == AUTONEG_DISABLE) {
1322 lc->speed = lc->requested_speed;
1323 lc->duplex = lc->requested_duplex;
1324 lc->fc = (unsigned char)fc;
1325 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1327 /* Also disables autoneg */
1328 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1330 phy->ops->autoneg_enable(phy);
1332 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1333 lc->fc = (unsigned char)fc;
1334 phy->ops->reset(phy, 0);
1340 * t3_set_vlan_accel - control HW VLAN extraction
1341 * @adapter: the adapter
1342 * @ports: bitmap of adapter ports to operate on
1343 * @on: enable (1) or disable (0) HW VLAN extraction
1345 * Enables or disables HW extraction of VLAN tags for the given port.
1347 void t3_set_vlan_accel(adapter_t *adapter, unsigned int ports, int on)
1349 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1350 ports << S_VLANEXTRACTIONENABLE,
1351 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1355 unsigned int mask; /* bits to check in interrupt status */
1356 const char *msg; /* message to print or NULL */
1357 short stat_idx; /* stat counter to increment or -1 */
1358 unsigned short fatal; /* whether the condition reported is fatal */
1362 * t3_handle_intr_status - table driven interrupt handler
1363 * @adapter: the adapter that generated the interrupt
1364 * @reg: the interrupt status register to process
1365 * @mask: a mask to apply to the interrupt status
1366 * @acts: table of interrupt actions
1367 * @stats: statistics counters tracking interrupt occurences
1369 * A table driven interrupt handler that applies a set of masks to an
1370 * interrupt status word and performs the corresponding actions if the
1371 * interrupts described by the mask have occured. The actions include
1372 * optionally printing a warning or alert message, and optionally
1373 * incrementing a stat counter. The table is terminated by an entry
1374 * specifying mask 0. Returns the number of fatal interrupt conditions.
1376 static int t3_handle_intr_status(adapter_t *adapter, unsigned int reg,
1378 const struct intr_info *acts,
1379 unsigned long *stats)
1382 unsigned int status = t3_read_reg(adapter, reg) & mask;
1384 for ( ; acts->mask; ++acts) {
1385 if (!(status & acts->mask)) continue;
1388 CH_ALERT(adapter, "%s (0x%x)\n",
1389 acts->msg, status & acts->mask);
1390 } else if (acts->msg)
1391 CH_WARN(adapter, "%s (0x%x)\n",
1392 acts->msg, status & acts->mask);
1393 if (acts->stat_idx >= 0)
1394 stats[acts->stat_idx]++;
1396 if (status) /* clear processed interrupts */
1397 t3_write_reg(adapter, reg, status);
1401 #define SGE_INTR_MASK (F_RSPQDISABLED | \
1402 F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1403 F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1404 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1405 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1406 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1408 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1409 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1411 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1412 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1413 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1414 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1415 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1416 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1417 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1418 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1419 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1420 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1421 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1422 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1423 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1424 F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1425 F_TXPARERR | V_BISTERR(M_BISTERR))
1426 #define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1427 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1428 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1429 #define ULPTX_INTR_MASK 0xfc
1430 #define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1431 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1432 F_ZERO_SWITCH_ERROR)
1433 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1434 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1435 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1436 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1437 F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1438 F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1439 F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1440 F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1441 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1442 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1443 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1444 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1445 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1446 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1447 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1448 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1449 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1450 V_MCAPARERRENB(M_MCAPARERRENB))
1451 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1452 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1453 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1454 F_MPS0 | F_CPL_SWITCH)
1457 * Interrupt handler for the PCIX1 module.
1459 static void pci_intr_handler(adapter_t *adapter)
1461 static struct intr_info pcix1_intr_info[] = {
1462 { F_MSTDETPARERR, "PCI master detected parity error", -1, 1 },
1463 { F_SIGTARABT, "PCI signaled target abort", -1, 1 },
1464 { F_RCVTARABT, "PCI received target abort", -1, 1 },
1465 { F_RCVMSTABT, "PCI received master abort", -1, 1 },
1466 { F_SIGSYSERR, "PCI signaled system error", -1, 1 },
1467 { F_DETPARERR, "PCI detected parity error", -1, 1 },
1468 { F_SPLCMPDIS, "PCI split completion discarded", -1, 1 },
1469 { F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1 },
1470 { F_RCVSPLCMPERR, "PCI received split completion error", -1,
1472 { F_DETCORECCERR, "PCI correctable ECC error",
1473 STAT_PCI_CORR_ECC, 0 },
1474 { F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1 },
1475 { F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1476 { V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1478 { V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1480 { V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1482 { V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1487 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1488 pcix1_intr_info, adapter->irq_stats))
1489 t3_fatal_err(adapter);
1493 * Interrupt handler for the PCIE module.
1495 static void pcie_intr_handler(adapter_t *adapter)
1497 static struct intr_info pcie_intr_info[] = {
1498 { F_PEXERR, "PCI PEX error", -1, 1 },
1500 "PCI unexpected split completion DMA read error", -1, 1 },
1502 "PCI unexpected split completion DMA command error", -1, 1 },
1503 { F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1504 { F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1 },
1505 { F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1 },
1506 { F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1 },
1507 { V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1508 "PCI MSI-X table/PBA parity error", -1, 1 },
1509 { F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1 },
1510 { F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1 },
1511 { F_RXPARERR, "PCI Rx parity error", -1, 1 },
1512 { F_TXPARERR, "PCI Tx parity error", -1, 1 },
1513 { V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1 },
1517 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1518 CH_ALERT(adapter, "PEX error code 0x%x\n",
1519 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1521 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1522 pcie_intr_info, adapter->irq_stats))
1523 t3_fatal_err(adapter);
1527 * TP interrupt handler.
1529 static void tp_intr_handler(adapter_t *adapter)
1531 static struct intr_info tp_intr_info[] = {
1532 { 0xffffff, "TP parity error", -1, 1 },
1533 { 0x1000000, "TP out of Rx pages", -1, 1 },
1534 { 0x2000000, "TP out of Tx pages", -1, 1 },
1537 static struct intr_info tp_intr_info_t3c[] = {
1538 { 0x1fffffff, "TP parity error", -1, 1 },
1539 { F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1 },
1540 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1544 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1545 adapter->params.rev < T3_REV_C ?
1546 tp_intr_info : tp_intr_info_t3c, NULL))
1547 t3_fatal_err(adapter);
1551 * CIM interrupt handler.
1553 static void cim_intr_handler(adapter_t *adapter)
1555 static struct intr_info cim_intr_info[] = {
1556 { F_RSVDSPACEINT, "CIM reserved space write", -1, 1 },
1557 { F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1 },
1558 { F_FLASHRANGEINT, "CIM flash address out of range", -1, 1 },
1559 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1560 { F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1 },
1561 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1562 { F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1 },
1563 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1564 { F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1 },
1565 { F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1 },
1566 { F_BLKRDPLINT, "CIM block read from PL space", -1, 1 },
1567 { F_BLKWRPLINT, "CIM block write to PL space", -1, 1 },
1568 { F_DRAMPARERR, "CIM DRAM parity error", -1, 1 },
1569 { F_ICACHEPARERR, "CIM icache parity error", -1, 1 },
1570 { F_DCACHEPARERR, "CIM dcache parity error", -1, 1 },
1571 { F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1 },
1572 { F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1 },
1573 { F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1 },
1574 { F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1 },
1575 { F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1 },
1576 { F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1 },
1577 { F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1 },
1578 { F_ITAGPARERR, "CIM itag parity error", -1, 1 },
1579 { F_DTAGPARERR, "CIM dtag parity error", -1, 1 },
1583 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, CIM_INTR_MASK,
1584 cim_intr_info, NULL))
1585 t3_fatal_err(adapter);
1589 * ULP RX interrupt handler.
1591 static void ulprx_intr_handler(adapter_t *adapter)
1593 static struct intr_info ulprx_intr_info[] = {
1594 { F_PARERRDATA, "ULP RX data parity error", -1, 1 },
1595 { F_PARERRPCMD, "ULP RX command parity error", -1, 1 },
1596 { F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1 },
1597 { F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1 },
1598 { F_ARBFPERR, "ULP RX ArbF parity error", -1, 1 },
1599 { F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1 },
1600 { F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1 },
1601 { F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1 },
1605 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1606 ulprx_intr_info, NULL))
1607 t3_fatal_err(adapter);
1611 * ULP TX interrupt handler.
1613 static void ulptx_intr_handler(adapter_t *adapter)
1615 static struct intr_info ulptx_intr_info[] = {
1616 { F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1617 STAT_ULP_CH0_PBL_OOB, 0 },
1618 { F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1619 STAT_ULP_CH1_PBL_OOB, 0 },
1620 { 0xfc, "ULP TX parity error", -1, 1 },
1624 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1625 ulptx_intr_info, adapter->irq_stats))
1626 t3_fatal_err(adapter);
1629 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1630 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1631 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1632 F_ICSPI1_TX_FRAMING_ERROR)
1633 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1634 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1635 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1636 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1639 * PM TX interrupt handler.
1641 static void pmtx_intr_handler(adapter_t *adapter)
1643 static struct intr_info pmtx_intr_info[] = {
1644 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1645 { ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1 },
1646 { OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1 },
1647 { V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1648 "PMTX ispi parity error", -1, 1 },
1649 { V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1650 "PMTX ospi parity error", -1, 1 },
1654 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1655 pmtx_intr_info, NULL))
1656 t3_fatal_err(adapter);
1659 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1660 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1661 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1662 F_IESPI1_TX_FRAMING_ERROR)
1663 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1664 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1665 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1666 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1669 * PM RX interrupt handler.
1671 static void pmrx_intr_handler(adapter_t *adapter)
1673 static struct intr_info pmrx_intr_info[] = {
1674 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1675 { IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1 },
1676 { OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1 },
1677 { V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1678 "PMRX ispi parity error", -1, 1 },
1679 { V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1680 "PMRX ospi parity error", -1, 1 },
1684 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1685 pmrx_intr_info, NULL))
1686 t3_fatal_err(adapter);
1690 * CPL switch interrupt handler.
1692 static void cplsw_intr_handler(adapter_t *adapter)
1694 static struct intr_info cplsw_intr_info[] = {
1695 { F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1 },
1696 { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 },
1697 { F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1 },
1698 { F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1 },
1699 { F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1 },
1700 { F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1 },
1704 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1705 cplsw_intr_info, NULL))
1706 t3_fatal_err(adapter);
1710 * MPS interrupt handler.
1712 static void mps_intr_handler(adapter_t *adapter)
1714 static struct intr_info mps_intr_info[] = {
1715 { 0x1ff, "MPS parity error", -1, 1 },
1719 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1720 mps_intr_info, NULL))
1721 t3_fatal_err(adapter);
1724 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1727 * MC7 interrupt handler.
1729 static void mc7_intr_handler(struct mc7 *mc7)
1731 adapter_t *adapter = mc7->adapter;
1732 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1735 mc7->stats.corr_err++;
1736 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1737 "data 0x%x 0x%x 0x%x\n", mc7->name,
1738 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1739 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1740 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1741 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1745 mc7->stats.uncorr_err++;
1746 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1747 "data 0x%x 0x%x 0x%x\n", mc7->name,
1748 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1749 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1750 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1751 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1755 mc7->stats.parity_err++;
1756 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1757 mc7->name, G_PE(cause));
1763 if (adapter->params.rev > 0)
1764 addr = t3_read_reg(adapter,
1765 mc7->offset + A_MC7_ERR_ADDR);
1766 mc7->stats.addr_err++;
1767 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1771 if (cause & MC7_INTR_FATAL)
1772 t3_fatal_err(adapter);
1774 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1777 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1778 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1780 * XGMAC interrupt handler.
1782 static int mac_intr_handler(adapter_t *adap, unsigned int idx)
1787 idx = idx == 0 ? 0 : adapter_info(adap)->nports0; /* MAC idx -> port */
1788 mac = &adap2pinfo(adap, idx)->mac;
1789 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1791 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1792 mac->stats.tx_fifo_parity_err++;
1793 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1795 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1796 mac->stats.rx_fifo_parity_err++;
1797 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1799 if (cause & F_TXFIFO_UNDERRUN)
1800 mac->stats.tx_fifo_urun++;
1801 if (cause & F_RXFIFO_OVERFLOW)
1802 mac->stats.rx_fifo_ovfl++;
1803 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1804 mac->stats.serdes_signal_loss++;
1805 if (cause & F_XAUIPCSCTCERR)
1806 mac->stats.xaui_pcs_ctc_err++;
1807 if (cause & F_XAUIPCSALIGNCHANGE)
1808 mac->stats.xaui_pcs_align_change++;
1810 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1811 if (cause & XGM_INTR_FATAL)
1817 * Interrupt handler for PHY events.
1819 int t3_phy_intr_handler(adapter_t *adapter)
1821 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1823 for_each_port(adapter, i) {
1824 struct port_info *p = adap2pinfo(adapter, i);
1826 if (!(p->phy.caps & SUPPORTED_IRQ))
1829 if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
1830 int phy_cause = p->phy.ops->intr_handler(&p->phy);
1832 if (phy_cause & cphy_cause_link_change)
1833 t3_link_changed(adapter, i);
1834 if (phy_cause & cphy_cause_fifo_error)
1835 p->phy.fifo_errors++;
1836 if (phy_cause & cphy_cause_module_change)
1837 t3_os_phymod_changed(adapter, i);
1841 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1846 * t3_slow_intr_handler - control path interrupt handler
1847 * @adapter: the adapter
1849 * T3 interrupt handler for non-data interrupt events, e.g., errors.
1850 * The designation 'slow' is because it involves register reads, while
1851 * data interrupts typically don't involve any MMIOs.
1853 int t3_slow_intr_handler(adapter_t *adapter)
1855 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1857 cause &= adapter->slow_intr_mask;
1860 if (cause & F_PCIM0) {
1861 if (is_pcie(adapter))
1862 pcie_intr_handler(adapter);
1864 pci_intr_handler(adapter);
1867 t3_sge_err_intr_handler(adapter);
1868 if (cause & F_MC7_PMRX)
1869 mc7_intr_handler(&adapter->pmrx);
1870 if (cause & F_MC7_PMTX)
1871 mc7_intr_handler(&adapter->pmtx);
1872 if (cause & F_MC7_CM)
1873 mc7_intr_handler(&adapter->cm);
1875 cim_intr_handler(adapter);
1877 tp_intr_handler(adapter);
1878 if (cause & F_ULP2_RX)
1879 ulprx_intr_handler(adapter);
1880 if (cause & F_ULP2_TX)
1881 ulptx_intr_handler(adapter);
1882 if (cause & F_PM1_RX)
1883 pmrx_intr_handler(adapter);
1884 if (cause & F_PM1_TX)
1885 pmtx_intr_handler(adapter);
1886 if (cause & F_CPL_SWITCH)
1887 cplsw_intr_handler(adapter);
1889 mps_intr_handler(adapter);
1891 t3_mc5_intr_handler(&adapter->mc5);
1892 if (cause & F_XGMAC0_0)
1893 mac_intr_handler(adapter, 0);
1894 if (cause & F_XGMAC0_1)
1895 mac_intr_handler(adapter, 1);
1896 if (cause & F_T3DBG)
1897 t3_os_ext_intr_handler(adapter);
1899 /* Clear the interrupts just processed. */
1900 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1901 (void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1905 static unsigned int calc_gpio_intr(adapter_t *adap)
1907 unsigned int i, gpi_intr = 0;
1909 for_each_port(adap, i)
1910 if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1911 adapter_info(adap)->gpio_intr[i])
1912 gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1917 * t3_intr_enable - enable interrupts
1918 * @adapter: the adapter whose interrupts should be enabled
1920 * Enable interrupts by setting the interrupt enable registers of the
1921 * various HW modules and then enabling the top-level interrupt
1924 void t3_intr_enable(adapter_t *adapter)
1926 static struct addr_val_pair intr_en_avp[] = {
1927 { A_MC7_INT_ENABLE, MC7_INTR_MASK },
1928 { A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1930 { A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1932 { A_MC5_DB_INT_ENABLE, MC5_INTR_MASK },
1933 { A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK },
1934 { A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK },
1935 { A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK },
1936 { A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK },
1937 { A_MPS_INT_ENABLE, MPS_INTR_MASK },
1940 adapter->slow_intr_mask = PL_INTR_MASK;
1942 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1943 t3_write_reg(adapter, A_TP_INT_ENABLE,
1944 adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
1945 t3_write_reg(adapter, A_SG_INT_ENABLE,
1946 adapter->params.rev >= T3_REV_C ?
1947 SGE_INTR_MASK | F_FLEMPTY : SGE_INTR_MASK);
1949 if (adapter->params.rev > 0) {
1950 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1951 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1952 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1953 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1954 F_PBL_BOUND_ERR_CH1);
1956 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1957 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1960 t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
1962 if (is_pcie(adapter))
1963 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1965 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1966 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1967 (void) t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1971 * t3_intr_disable - disable a card's interrupts
1972 * @adapter: the adapter whose interrupts should be disabled
1974 * Disable interrupts. We only disable the top-level interrupt
1975 * concentrator and the SGE data interrupts.
1977 void t3_intr_disable(adapter_t *adapter)
1979 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1980 (void) t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1981 adapter->slow_intr_mask = 0;
1985 * t3_intr_clear - clear all interrupts
1986 * @adapter: the adapter whose interrupts should be cleared
1988 * Clears all interrupts.
1990 void t3_intr_clear(adapter_t *adapter)
1992 static const unsigned int cause_reg_addr[] = {
1994 A_SG_RSPQ_FL_STATUS,
1997 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1998 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1999 A_CIM_HOST_INT_CAUSE,
2012 /* Clear PHY and MAC interrupts for each port. */
2013 for_each_port(adapter, i)
2014 t3_port_intr_clear(adapter, i);
2016 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
2017 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
2019 if (is_pcie(adapter))
2020 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
2021 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
2022 (void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
2026 * t3_port_intr_enable - enable port-specific interrupts
2027 * @adapter: associated adapter
2028 * @idx: index of port whose interrupts should be enabled
2030 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
2033 void t3_port_intr_enable(adapter_t *adapter, int idx)
2035 struct port_info *pi = adap2pinfo(adapter, idx);
2037 t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, XGM_INTR_MASK);
2038 pi->phy.ops->intr_enable(&pi->phy);
2042 * t3_port_intr_disable - disable port-specific interrupts
2043 * @adapter: associated adapter
2044 * @idx: index of port whose interrupts should be disabled
2046 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
2049 void t3_port_intr_disable(adapter_t *adapter, int idx)
2051 struct port_info *pi = adap2pinfo(adapter, idx);
2053 t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, 0);
2054 pi->phy.ops->intr_disable(&pi->phy);
2058 * t3_port_intr_clear - clear port-specific interrupts
2059 * @adapter: associated adapter
2060 * @idx: index of port whose interrupts to clear
2062 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
2065 void t3_port_intr_clear(adapter_t *adapter, int idx)
2067 struct port_info *pi = adap2pinfo(adapter, idx);
2069 t3_write_reg(adapter, A_XGM_INT_CAUSE + pi->mac.offset, 0xffffffff);
2070 pi->phy.ops->intr_clear(&pi->phy);
2073 #define SG_CONTEXT_CMD_ATTEMPTS 100
2076 * t3_sge_write_context - write an SGE context
2077 * @adapter: the adapter
2078 * @id: the context id
2079 * @type: the context type
2081 * Program an SGE context with the values already loaded in the
2082 * CONTEXT_DATA? registers.
2084 static int t3_sge_write_context(adapter_t *adapter, unsigned int id,
2087 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2088 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2089 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2090 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2091 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2092 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2093 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2094 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2097 static int clear_sge_ctxt(adapter_t *adap, unsigned int id, unsigned int type)
2099 t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2100 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2101 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2102 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2103 return t3_sge_write_context(adap, id, type);
2107 * t3_sge_init_ecntxt - initialize an SGE egress context
2108 * @adapter: the adapter to configure
2109 * @id: the context id
2110 * @gts_enable: whether to enable GTS for the context
2111 * @type: the egress context type
2112 * @respq: associated response queue
2113 * @base_addr: base address of queue
2114 * @size: number of queue entries
2116 * @gen: initial generation value for the context
2117 * @cidx: consumer pointer
2119 * Initialize an SGE egress context and make it ready for use. If the
2120 * platform allows concurrent context operations, the caller is
2121 * responsible for appropriate locking.
2123 int t3_sge_init_ecntxt(adapter_t *adapter, unsigned int id, int gts_enable,
2124 enum sge_context_type type, int respq, u64 base_addr,
2125 unsigned int size, unsigned int token, int gen,
2128 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2130 if (base_addr & 0xfff) /* must be 4K aligned */
2132 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2136 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2137 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2138 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2139 V_EC_BASE_LO((u32)base_addr & 0xffff));
2141 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, (u32)base_addr);
2143 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2144 V_EC_BASE_HI((u32)base_addr & 0xf) | V_EC_RESPQ(respq) |
2145 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2147 return t3_sge_write_context(adapter, id, F_EGRESS);
2151 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2152 * @adapter: the adapter to configure
2153 * @id: the context id
2154 * @gts_enable: whether to enable GTS for the context
2155 * @base_addr: base address of queue
2156 * @size: number of queue entries
2157 * @bsize: size of each buffer for this queue
2158 * @cong_thres: threshold to signal congestion to upstream producers
2159 * @gen: initial generation value for the context
2160 * @cidx: consumer pointer
2162 * Initialize an SGE free list context and make it ready for use. The
2163 * caller is responsible for ensuring only one context operation occurs
2166 int t3_sge_init_flcntxt(adapter_t *adapter, unsigned int id, int gts_enable,
2167 u64 base_addr, unsigned int size, unsigned int bsize,
2168 unsigned int cong_thres, int gen, unsigned int cidx)
2170 if (base_addr & 0xfff) /* must be 4K aligned */
2172 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2176 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, (u32)base_addr);
2178 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2179 V_FL_BASE_HI((u32)base_addr) |
2180 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2181 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2182 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2183 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2184 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2185 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2186 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2187 return t3_sge_write_context(adapter, id, F_FREELIST);
2191 * t3_sge_init_rspcntxt - initialize an SGE response queue context
2192 * @adapter: the adapter to configure
2193 * @id: the context id
2194 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2195 * @base_addr: base address of queue
2196 * @size: number of queue entries
2197 * @fl_thres: threshold for selecting the normal or jumbo free list
2198 * @gen: initial generation value for the context
2199 * @cidx: consumer pointer
2201 * Initialize an SGE response queue context and make it ready for use.
2202 * The caller is responsible for ensuring only one context operation
2205 int t3_sge_init_rspcntxt(adapter_t *adapter, unsigned int id, int irq_vec_idx,
2206 u64 base_addr, unsigned int size,
2207 unsigned int fl_thres, int gen, unsigned int cidx)
2209 unsigned int intr = 0;
2211 if (base_addr & 0xfff) /* must be 4K aligned */
2213 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2217 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2219 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
2221 if (irq_vec_idx >= 0)
2222 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2223 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2224 V_CQ_BASE_HI((u32)base_addr) | intr | V_RQ_GEN(gen));
2225 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2226 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2230 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2231 * @adapter: the adapter to configure
2232 * @id: the context id
2233 * @base_addr: base address of queue
2234 * @size: number of queue entries
2235 * @rspq: response queue for async notifications
2236 * @ovfl_mode: CQ overflow mode
2237 * @credits: completion queue credits
2238 * @credit_thres: the credit threshold
2240 * Initialize an SGE completion queue context and make it ready for use.
2241 * The caller is responsible for ensuring only one context operation
2244 int t3_sge_init_cqcntxt(adapter_t *adapter, unsigned int id, u64 base_addr,
2245 unsigned int size, int rspq, int ovfl_mode,
2246 unsigned int credits, unsigned int credit_thres)
2248 if (base_addr & 0xfff) /* must be 4K aligned */
2250 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2254 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2255 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
2257 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2258 V_CQ_BASE_HI((u32)base_addr) | V_CQ_RSPQ(rspq) |
2259 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2260 V_CQ_ERR(ovfl_mode));
2261 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2262 V_CQ_CREDIT_THRES(credit_thres));
2263 return t3_sge_write_context(adapter, id, F_CQ);
2267 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2268 * @adapter: the adapter
2269 * @id: the egress context id
2270 * @enable: enable (1) or disable (0) the context
2272 * Enable or disable an SGE egress context. The caller is responsible for
2273 * ensuring only one context operation occurs at a time.
2275 int t3_sge_enable_ecntxt(adapter_t *adapter, unsigned int id, int enable)
2277 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2280 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2281 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2282 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2283 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2284 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2285 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2286 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2287 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2288 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2292 * t3_sge_disable_fl - disable an SGE free-buffer list
2293 * @adapter: the adapter
2294 * @id: the free list context id
2296 * Disable an SGE free-buffer list. The caller is responsible for
2297 * ensuring only one context operation occurs at a time.
2299 int t3_sge_disable_fl(adapter_t *adapter, unsigned int id)
2301 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2304 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2305 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2306 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2307 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2308 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2309 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2310 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2311 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2312 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2316 * t3_sge_disable_rspcntxt - disable an SGE response queue
2317 * @adapter: the adapter
2318 * @id: the response queue context id
2320 * Disable an SGE response queue. The caller is responsible for
2321 * ensuring only one context operation occurs at a time.
2323 int t3_sge_disable_rspcntxt(adapter_t *adapter, unsigned int id)
2325 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2328 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2329 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2330 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2331 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2332 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2333 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2334 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2335 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2336 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2340 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2341 * @adapter: the adapter
2342 * @id: the completion queue context id
2344 * Disable an SGE completion queue. The caller is responsible for
2345 * ensuring only one context operation occurs at a time.
2347 int t3_sge_disable_cqcntxt(adapter_t *adapter, unsigned int id)
2349 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2352 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2353 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2354 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2355 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2356 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2357 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2358 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2359 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2360 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2364 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2365 * @adapter: the adapter
2366 * @id: the context id
2367 * @op: the operation to perform
2368 * @credits: credits to return to the CQ
2370 * Perform the selected operation on an SGE completion queue context.
2371 * The caller is responsible for ensuring only one context operation
2374 * For most operations the function returns the current HW position in
2375 * the completion queue.
2377 int t3_sge_cqcntxt_op(adapter_t *adapter, unsigned int id, unsigned int op,
2378 unsigned int credits)
2382 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2385 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2386 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2387 V_CONTEXT(id) | F_CQ);
2388 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2389 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2392 if (op >= 2 && op < 7) {
2393 if (adapter->params.rev > 0)
2394 return G_CQ_INDEX(val);
2396 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2397 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2398 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2399 F_CONTEXT_CMD_BUSY, 0,
2400 SG_CONTEXT_CMD_ATTEMPTS, 1))
2402 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2408 * t3_sge_read_context - read an SGE context
2409 * @type: the context type
2410 * @adapter: the adapter
2411 * @id: the context id
2412 * @data: holds the retrieved context
2414 * Read an SGE egress context. The caller is responsible for ensuring
2415 * only one context operation occurs at a time.
2417 static int t3_sge_read_context(unsigned int type, adapter_t *adapter,
2418 unsigned int id, u32 data[4])
2420 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2423 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2424 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2425 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2426 SG_CONTEXT_CMD_ATTEMPTS, 1))
2428 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2429 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2430 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2431 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2436 * t3_sge_read_ecntxt - read an SGE egress context
2437 * @adapter: the adapter
2438 * @id: the context id
2439 * @data: holds the retrieved context
2441 * Read an SGE egress context. The caller is responsible for ensuring
2442 * only one context operation occurs at a time.
2444 int t3_sge_read_ecntxt(adapter_t *adapter, unsigned int id, u32 data[4])
2448 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2452 * t3_sge_read_cq - read an SGE CQ context
2453 * @adapter: the adapter
2454 * @id: the context id
2455 * @data: holds the retrieved context
2457 * Read an SGE CQ context. The caller is responsible for ensuring
2458 * only one context operation occurs at a time.
2460 int t3_sge_read_cq(adapter_t *adapter, unsigned int id, u32 data[4])
2464 return t3_sge_read_context(F_CQ, adapter, id, data);
2468 * t3_sge_read_fl - read an SGE free-list context
2469 * @adapter: the adapter
2470 * @id: the context id
2471 * @data: holds the retrieved context
2473 * Read an SGE free-list context. The caller is responsible for ensuring
2474 * only one context operation occurs at a time.
2476 int t3_sge_read_fl(adapter_t *adapter, unsigned int id, u32 data[4])
2478 if (id >= SGE_QSETS * 2)
2480 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2484 * t3_sge_read_rspq - read an SGE response queue context
2485 * @adapter: the adapter
2486 * @id: the context id
2487 * @data: holds the retrieved context
2489 * Read an SGE response queue context. The caller is responsible for
2490 * ensuring only one context operation occurs at a time.
2492 int t3_sge_read_rspq(adapter_t *adapter, unsigned int id, u32 data[4])
2494 if (id >= SGE_QSETS)
2496 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2500 * t3_config_rss - configure Rx packet steering
2501 * @adapter: the adapter
2502 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2503 * @cpus: values for the CPU lookup table (0xff terminated)
2504 * @rspq: values for the response queue lookup table (0xffff terminated)
2506 * Programs the receive packet steering logic. @cpus and @rspq provide
2507 * the values for the CPU and response queue lookup tables. If they
2508 * provide fewer values than the size of the tables the supplied values
2509 * are used repeatedly until the tables are fully populated.
2511 void t3_config_rss(adapter_t *adapter, unsigned int rss_config, const u8 *cpus,
2514 int i, j, cpu_idx = 0, q_idx = 0;
2517 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2520 for (j = 0; j < 2; ++j) {
2521 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2522 if (cpus[cpu_idx] == 0xff)
2525 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2529 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2530 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2531 (i << 16) | rspq[q_idx++]);
2532 if (rspq[q_idx] == 0xffff)
2536 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2540 * t3_read_rss - read the contents of the RSS tables
2541 * @adapter: the adapter
2542 * @lkup: holds the contents of the RSS lookup table
2543 * @map: holds the contents of the RSS map table
2545 * Reads the contents of the receive packet steering tables.
2547 int t3_read_rss(adapter_t *adapter, u8 *lkup, u16 *map)
2553 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2554 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2556 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2557 if (!(val & 0x80000000))
2560 *lkup++ = (u8)(val >> 8);
2564 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2565 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2567 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2568 if (!(val & 0x80000000))
2576 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2577 * @adap: the adapter
2578 * @enable: 1 to select offload mode, 0 for regular NIC
2580 * Switches TP to NIC/offload mode.
2582 void t3_tp_set_offload_mode(adapter_t *adap, int enable)
2584 if (is_offload(adap) || !enable)
2585 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2586 V_NICMODE(!enable));
2590 * tp_wr_bits_indirect - set/clear bits in an indirect TP register
2591 * @adap: the adapter
2592 * @addr: the indirect TP register address
2593 * @mask: specifies the field within the register to modify
2594 * @val: new value for the field
2596 * Sets a field of an indirect TP register to the given value.
2598 static void tp_wr_bits_indirect(adapter_t *adap, unsigned int addr,
2599 unsigned int mask, unsigned int val)
2601 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2602 val |= t3_read_reg(adap, A_TP_PIO_DATA) & ~mask;
2603 t3_write_reg(adap, A_TP_PIO_DATA, val);
2607 * t3_enable_filters - enable the HW filters
2608 * @adap: the adapter
2610 * Enables the HW filters for NIC traffic.
2612 void t3_enable_filters(adapter_t *adap)
2614 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE, 0);
2615 t3_set_reg_field(adap, A_MC5_DB_CONFIG, 0, F_FILTEREN);
2616 t3_set_reg_field(adap, A_TP_GLOBAL_CONFIG, 0, V_FIVETUPLELOOKUP(3));
2617 tp_wr_bits_indirect(adap, A_TP_INGRESS_CONFIG, 0, F_LOOKUPEVERYPKT);
2621 * pm_num_pages - calculate the number of pages of the payload memory
2622 * @mem_size: the size of the payload memory
2623 * @pg_size: the size of each payload memory page
2625 * Calculate the number of pages, each of the given size, that fit in a
2626 * memory of the specified size, respecting the HW requirement that the
2627 * number of pages must be a multiple of 24.
2629 static inline unsigned int pm_num_pages(unsigned int mem_size,
2630 unsigned int pg_size)
2632 unsigned int n = mem_size / pg_size;
2637 #define mem_region(adap, start, size, reg) \
2638 t3_write_reg((adap), A_ ## reg, (start)); \
2642 * partition_mem - partition memory and configure TP memory settings
2643 * @adap: the adapter
2644 * @p: the TP parameters
2646 * Partitions context and payload memory and configures TP's memory
2649 static void partition_mem(adapter_t *adap, const struct tp_params *p)
2651 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2652 unsigned int timers = 0, timers_shift = 22;
2654 if (adap->params.rev > 0) {
2655 if (tids <= 16 * 1024) {
2658 } else if (tids <= 64 * 1024) {
2661 } else if (tids <= 256 * 1024) {
2667 t3_write_reg(adap, A_TP_PMM_SIZE,
2668 p->chan_rx_size | (p->chan_tx_size >> 16));
2670 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2671 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2672 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2673 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2674 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2676 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2677 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2678 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2680 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2681 /* Add a bit of headroom and make multiple of 24 */
2683 pstructs -= pstructs % 24;
2684 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2686 m = tids * TCB_SIZE;
2687 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2688 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2689 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2690 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2691 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2692 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2693 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2694 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2696 m = (m + 4095) & ~0xfff;
2697 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2698 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2700 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2701 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2702 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2704 adap->params.mc5.nservers += m - tids;
2707 static inline void tp_wr_indirect(adapter_t *adap, unsigned int addr, u32 val)
2709 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2710 t3_write_reg(adap, A_TP_PIO_DATA, val);
2713 static void tp_config(adapter_t *adap, const struct tp_params *p)
2715 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2716 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2717 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2718 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2719 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2720 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2721 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2722 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2723 V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
2724 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2725 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
2726 F_IPV6ENABLE | F_NICMODE);
2727 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2728 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2729 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2730 adap->params.rev > 0 ? F_ENABLEESND :
2732 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2734 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2735 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2736 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2737 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2738 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
2739 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2740 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2742 if (adap->params.rev > 0) {
2743 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2744 t3_set_reg_field(adap, A_TP_PARA_REG3, 0,
2745 F_TXPACEAUTO | F_TXPACEAUTOSTRICT);
2746 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2747 tp_wr_indirect(adap, A_TP_VLAN_PRI_MAP, 0xfa50);
2748 tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP0, 0xfac688);
2749 tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP1, 0xfac688);
2751 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2753 if (adap->params.rev == T3_REV_C)
2754 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2755 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2756 V_TABLELATENCYDELTA(4));
2758 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2759 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2760 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2761 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2763 if (adap->params.nports > 2) {
2764 t3_set_reg_field(adap, A_TP_PC_CONFIG2, 0,
2765 F_ENABLETXPORTFROMDA2 | F_ENABLETXPORTFROMDA |
2766 F_ENABLERXPORTFROMADDR);
2767 tp_wr_bits_indirect(adap, A_TP_QOS_RX_MAP_MODE,
2768 V_RXMAPMODE(M_RXMAPMODE), 0);
2769 tp_wr_indirect(adap, A_TP_INGRESS_CONFIG, V_BITPOS0(48) |
2770 V_BITPOS1(49) | V_BITPOS2(50) | V_BITPOS3(51) |
2771 F_ENABLEEXTRACT | F_ENABLEEXTRACTIONSFD |
2772 F_ENABLEINSERTION | F_ENABLEINSERTIONSFD);
2773 tp_wr_indirect(adap, A_TP_PREAMBLE_MSB, 0xfb000000);
2774 tp_wr_indirect(adap, A_TP_PREAMBLE_LSB, 0xd5);
2775 tp_wr_indirect(adap, A_TP_INTF_FROM_TX_PKT, F_INTFFROMTXPKT);
2779 /* TCP timer values in ms */
2780 #define TP_DACK_TIMER 50
2781 #define TP_RTO_MIN 250
2784 * tp_set_timers - set TP timing parameters
2785 * @adap: the adapter to set
2786 * @core_clk: the core clock frequency in Hz
2788 * Set TP's timing parameters, such as the various timer resolutions and
2789 * the TCP timer values.
2791 static void tp_set_timers(adapter_t *adap, unsigned int core_clk)
2793 unsigned int tre = adap->params.tp.tre;
2794 unsigned int dack_re = adap->params.tp.dack_re;
2795 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2796 unsigned int tps = core_clk >> tre;
2798 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2799 V_DELAYEDACKRESOLUTION(dack_re) |
2800 V_TIMESTAMPRESOLUTION(tstamp_re));
2801 t3_write_reg(adap, A_TP_DACK_TIMER,
2802 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2803 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2804 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2805 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2806 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2807 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2808 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2809 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2812 #define SECONDS * tps
2814 t3_write_reg(adap, A_TP_MSL,
2815 adap->params.rev > 0 ? 0 : 2 SECONDS);
2816 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2817 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2818 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2819 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2820 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2821 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2822 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2823 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2828 #ifdef CONFIG_CHELSIO_T3_CORE
2830 * t3_tp_set_coalescing_size - set receive coalescing size
2831 * @adap: the adapter
2832 * @size: the receive coalescing size
2833 * @psh: whether a set PSH bit should deliver coalesced data
2835 * Set the receive coalescing size and PSH bit handling.
2837 int t3_tp_set_coalescing_size(adapter_t *adap, unsigned int size, int psh)
2841 if (size > MAX_RX_COALESCING_LEN)
2844 val = t3_read_reg(adap, A_TP_PARA_REG3);
2845 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2848 val |= F_RXCOALESCEENABLE;
2850 val |= F_RXCOALESCEPSHEN;
2851 size = min(MAX_RX_COALESCING_LEN, size);
2852 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2853 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2855 t3_write_reg(adap, A_TP_PARA_REG3, val);
2860 * t3_tp_set_max_rxsize - set the max receive size
2861 * @adap: the adapter
2862 * @size: the max receive size
2864 * Set TP's max receive size. This is the limit that applies when
2865 * receive coalescing is disabled.
2867 void t3_tp_set_max_rxsize(adapter_t *adap, unsigned int size)
2869 t3_write_reg(adap, A_TP_PARA_REG7,
2870 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2873 static void __devinit init_mtus(unsigned short mtus[])
2876 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2877 * it can accomodate max size TCP/IP headers when SACK and timestamps
2878 * are enabled and still have at least 8 bytes of payload.
2899 * init_cong_ctrl - initialize congestion control parameters
2900 * @a: the alpha values for congestion control
2901 * @b: the beta values for congestion control
2903 * Initialize the congestion control parameters.
2905 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
2907 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2932 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2935 b[13] = b[14] = b[15] = b[16] = 3;
2936 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2937 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2942 /* The minimum additive increment value for the congestion control table */
2943 #define CC_MIN_INCR 2U
2946 * t3_load_mtus - write the MTU and congestion control HW tables
2947 * @adap: the adapter
2948 * @mtus: the unrestricted values for the MTU table
2949 * @alpha: the values for the congestion control alpha parameter
2950 * @beta: the values for the congestion control beta parameter
2951 * @mtu_cap: the maximum permitted effective MTU
2953 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2954 * Update the high-speed congestion control table with the supplied alpha,
2957 void t3_load_mtus(adapter_t *adap, unsigned short mtus[NMTUS],
2958 unsigned short alpha[NCCTRL_WIN],
2959 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2961 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2962 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2963 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2964 28672, 40960, 57344, 81920, 114688, 163840, 229376 };
2968 for (i = 0; i < NMTUS; ++i) {
2969 unsigned int mtu = min(mtus[i], mtu_cap);
2970 unsigned int log2 = fls(mtu);
2972 if (!(mtu & ((1 << log2) >> 2))) /* round */
2974 t3_write_reg(adap, A_TP_MTU_TABLE,
2975 (i << 24) | (log2 << 16) | mtu);
2977 for (w = 0; w < NCCTRL_WIN; ++w) {
2980 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2983 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2984 (w << 16) | (beta[w] << 13) | inc);
2990 * t3_read_hw_mtus - returns the values in the HW MTU table
2991 * @adap: the adapter
2992 * @mtus: where to store the HW MTU values
2994 * Reads the HW MTU table.
2996 void t3_read_hw_mtus(adapter_t *adap, unsigned short mtus[NMTUS])
3000 for (i = 0; i < NMTUS; ++i) {
3003 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
3004 val = t3_read_reg(adap, A_TP_MTU_TABLE);
3005 mtus[i] = val & 0x3fff;
3010 * t3_get_cong_cntl_tab - reads the congestion control table
3011 * @adap: the adapter
3012 * @incr: where to store the alpha values
3014 * Reads the additive increments programmed into the HW congestion
3017 void t3_get_cong_cntl_tab(adapter_t *adap,
3018 unsigned short incr[NMTUS][NCCTRL_WIN])
3020 unsigned int mtu, w;
3022 for (mtu = 0; mtu < NMTUS; ++mtu)
3023 for (w = 0; w < NCCTRL_WIN; ++w) {
3024 t3_write_reg(adap, A_TP_CCTRL_TABLE,
3025 0xffff0000 | (mtu << 5) | w);
3026 incr[mtu][w] = (unsigned short)t3_read_reg(adap,
3027 A_TP_CCTRL_TABLE) & 0x1fff;
3032 * t3_tp_get_mib_stats - read TP's MIB counters
3033 * @adap: the adapter
3034 * @tps: holds the returned counter values
3036 * Returns the values of TP's MIB counters.
3038 void t3_tp_get_mib_stats(adapter_t *adap, struct tp_mib_stats *tps)
3040 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *)tps,
3041 sizeof(*tps) / sizeof(u32), 0);
3045 * t3_read_pace_tbl - read the pace table
3046 * @adap: the adapter
3047 * @pace_vals: holds the returned values
3049 * Returns the values of TP's pace table in nanoseconds.
3051 void t3_read_pace_tbl(adapter_t *adap, unsigned int pace_vals[NTX_SCHED])
3053 unsigned int i, tick_ns = dack_ticks_to_usec(adap, 1000);
3055 for (i = 0; i < NTX_SCHED; i++) {
3056 t3_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
3057 pace_vals[i] = t3_read_reg(adap, A_TP_PACE_TABLE) * tick_ns;
3062 * t3_set_pace_tbl - set the pace table
3063 * @adap: the adapter
3064 * @pace_vals: the pace values in nanoseconds
3065 * @start: index of the first entry in the HW pace table to set
3066 * @n: how many entries to set
3068 * Sets (a subset of the) HW pace table.
3070 void t3_set_pace_tbl(adapter_t *adap, unsigned int *pace_vals,
3071 unsigned int start, unsigned int n)
3073 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
3075 for ( ; n; n--, start++, pace_vals++)
3076 t3_write_reg(adap, A_TP_PACE_TABLE, (start << 16) |
3077 ((*pace_vals + tick_ns / 2) / tick_ns));
3080 #define ulp_region(adap, name, start, len) \
3081 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
3082 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
3083 (start) + (len) - 1); \
3086 #define ulptx_region(adap, name, start, len) \
3087 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
3088 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
3089 (start) + (len) - 1)
3091 static void ulp_config(adapter_t *adap, const struct tp_params *p)
3093 unsigned int m = p->chan_rx_size;
3095 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
3096 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
3097 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
3098 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
3099 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
3100 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
3101 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
3102 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
3107 * t3_set_proto_sram - set the contents of the protocol sram
3108 * @adapter: the adapter
3109 * @data: the protocol image
3111 * Write the contents of the protocol SRAM.
3113 int t3_set_proto_sram(adapter_t *adap, const u8 *data)
3116 const u32 *buf = (const u32 *)data;
3118 for (i = 0; i < PROTO_SRAM_LINES; i++) {
3119 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, cpu_to_be32(*buf++));
3120 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, cpu_to_be32(*buf++));
3121 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, cpu_to_be32(*buf++));
3122 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, cpu_to_be32(*buf++));
3123 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, cpu_to_be32(*buf++));
3125 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
3126 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
3134 * t3_config_trace_filter - configure one of the tracing filters
3135 * @adapter: the adapter
3136 * @tp: the desired trace filter parameters
3137 * @filter_index: which filter to configure
3138 * @invert: if set non-matching packets are traced instead of matching ones
3139 * @enable: whether to enable or disable the filter
3141 * Configures one of the tracing filters available in HW.
3143 void t3_config_trace_filter(adapter_t *adapter, const struct trace_params *tp,
3144 int filter_index, int invert, int enable)
3146 u32 addr, key[4], mask[4];
3148 key[0] = tp->sport | (tp->sip << 16);
3149 key[1] = (tp->sip >> 16) | (tp->dport << 16);
3151 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
3153 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
3154 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
3155 mask[2] = tp->dip_mask;
3156 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
3159 key[3] |= (1 << 29);
3161 key[3] |= (1 << 28);
3163 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3164 tp_wr_indirect(adapter, addr++, key[0]);
3165 tp_wr_indirect(adapter, addr++, mask[0]);
3166 tp_wr_indirect(adapter, addr++, key[1]);
3167 tp_wr_indirect(adapter, addr++, mask[1]);
3168 tp_wr_indirect(adapter, addr++, key[2]);
3169 tp_wr_indirect(adapter, addr++, mask[2]);
3170 tp_wr_indirect(adapter, addr++, key[3]);
3171 tp_wr_indirect(adapter, addr, mask[3]);
3172 (void) t3_read_reg(adapter, A_TP_PIO_DATA);
3176 * t3_config_sched - configure a HW traffic scheduler
3177 * @adap: the adapter
3178 * @kbps: target rate in Kbps
3179 * @sched: the scheduler index
3181 * Configure a Tx HW scheduler for the target rate.
3183 int t3_config_sched(adapter_t *adap, unsigned int kbps, int sched)
3185 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3186 unsigned int clk = adap->params.vpd.cclk * 1000;
3187 unsigned int selected_cpt = 0, selected_bpt = 0;
3190 kbps *= 125; /* -> bytes */
3191 for (cpt = 1; cpt <= 255; cpt++) {
3193 bpt = (kbps + tps / 2) / tps;
3194 if (bpt > 0 && bpt <= 255) {
3196 delta = v >= kbps ? v - kbps : kbps - v;
3197 if (delta < mindelta) {
3202 } else if (selected_cpt)
3208 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3209 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3210 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3212 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3214 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3215 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3220 * t3_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
3221 * @adap: the adapter
3222 * @sched: the scheduler index
3223 * @ipg: the interpacket delay in tenths of nanoseconds
3225 * Set the interpacket delay for a HW packet rate scheduler.
3227 int t3_set_sched_ipg(adapter_t *adap, int sched, unsigned int ipg)
3229 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3231 /* convert ipg to nearest number of core clocks */
3232 ipg *= core_ticks_per_usec(adap);
3233 ipg = (ipg + 5000) / 10000;
3237 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3238 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3240 v = (v & 0xffff) | (ipg << 16);
3242 v = (v & 0xffff0000) | ipg;
3243 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3244 t3_read_reg(adap, A_TP_TM_PIO_DATA);
3249 * t3_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3250 * @adap: the adapter
3251 * @sched: the scheduler index
3252 * @kbps: the byte rate in Kbps
3253 * @ipg: the interpacket delay in tenths of nanoseconds
3255 * Return the current configuration of a HW Tx scheduler.
3257 void t3_get_tx_sched(adapter_t *adap, unsigned int sched, unsigned int *kbps,
3260 unsigned int v, addr, bpt, cpt;
3263 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3264 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3265 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3268 bpt = (v >> 8) & 0xff;
3271 *kbps = 0; /* scheduler disabled */
3273 v = (adap->params.vpd.cclk * 1000) / cpt;
3274 *kbps = (v * bpt) / 125;
3278 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3279 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3280 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3284 *ipg = (10000 * v) / core_ticks_per_usec(adap);
3289 * tp_init - configure TP
3290 * @adap: the adapter
3291 * @p: TP configuration parameters
3293 * Initializes the TP HW module.
3295 static int tp_init(adapter_t *adap, const struct tp_params *p)
3300 t3_set_vlan_accel(adap, 3, 0);
3302 if (is_offload(adap)) {
3303 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3304 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3305 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3308 CH_ERR(adap, "TP initialization timed out\n");
3312 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3317 * t3_mps_set_active_ports - configure port failover
3318 * @adap: the adapter
3319 * @port_mask: bitmap of active ports
3321 * Sets the active ports according to the supplied bitmap.
3323 int t3_mps_set_active_ports(adapter_t *adap, unsigned int port_mask)
3325 if (port_mask & ~((1 << adap->params.nports) - 1))
3327 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3328 port_mask << S_PORT0ACTIVE);
3333 * chan_init_hw - channel-dependent HW initialization
3334 * @adap: the adapter
3335 * @chan_map: bitmap of Tx channels being used
3337 * Perform the bits of HW initialization that are dependent on the Tx
3338 * channels being used.
3340 static void chan_init_hw(adapter_t *adap, unsigned int chan_map)
3344 if (chan_map != 3) { /* one channel */
3345 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3346 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3347 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3348 (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3349 F_TPTXPORT1EN | F_PORT1ACTIVE));
3350 t3_write_reg(adap, A_PM1_TX_CFG,
3351 chan_map == 1 ? 0xffffffff : 0);
3353 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3354 V_TX_MOD_QUEUE_REQ_MAP(0xff));
3355 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xd9c8);
3356 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfbea);
3357 } else { /* two channels */
3358 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3359 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3360 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3361 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3362 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3363 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3365 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3366 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3367 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3368 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3369 for (i = 0; i < 16; i++)
3370 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3371 (i << 16) | 0x1010);
3372 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xba98);
3373 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfedc);
3377 static int calibrate_xgm(adapter_t *adapter)
3379 if (uses_xaui(adapter)) {
3382 for (i = 0; i < 5; ++i) {
3383 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3384 (void) t3_read_reg(adapter, A_XGM_XAUI_IMP);
3386 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3387 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3388 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3389 V_XAUIIMP(G_CALIMP(v) >> 2));
3393 CH_ERR(adapter, "MAC calibration failed\n");
3396 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3397 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3398 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3399 F_XGM_IMPSETUPDATE);
3404 static void calibrate_xgm_t3b(adapter_t *adapter)
3406 if (!uses_xaui(adapter)) {
3407 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3408 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3409 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3410 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3411 F_XGM_IMPSETUPDATE);
3412 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3414 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3415 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3419 struct mc7_timing_params {
3420 unsigned char ActToPreDly;
3421 unsigned char ActToRdWrDly;
3422 unsigned char PreCyc;
3423 unsigned char RefCyc[5];
3424 unsigned char BkCyc;
3425 unsigned char WrToRdDly;
3426 unsigned char RdToWrDly;
3430 * Write a value to a register and check that the write completed. These
3431 * writes normally complete in a cycle or two, so one read should suffice.
3432 * The very first read exists to flush the posted write to the device.
3434 static int wrreg_wait(adapter_t *adapter, unsigned int addr, u32 val)
3436 t3_write_reg(adapter, addr, val);
3437 (void) t3_read_reg(adapter, addr); /* flush */
3438 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3440 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3444 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3446 static const unsigned int mc7_mode[] = {
3447 0x632, 0x642, 0x652, 0x432, 0x442
3449 static const struct mc7_timing_params mc7_timings[] = {
3450 { 12, 3, 4, { 20, 28, 34, 52, 0 }, 15, 6, 4 },
3451 { 12, 4, 5, { 20, 28, 34, 52, 0 }, 16, 7, 4 },
3452 { 12, 5, 6, { 20, 28, 34, 52, 0 }, 17, 8, 4 },
3453 { 9, 3, 4, { 15, 21, 26, 39, 0 }, 12, 6, 4 },
3454 { 9, 4, 5, { 15, 21, 26, 39, 0 }, 13, 7, 4 }
3458 unsigned int width, density, slow, attempts;
3459 adapter_t *adapter = mc7->adapter;
3460 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3465 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3466 slow = val & F_SLOW;
3467 width = G_WIDTH(val);
3468 density = G_DEN(val);
3470 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3471 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3475 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3476 (void) t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3478 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3479 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3480 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3486 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3487 V_ACTTOPREDLY(p->ActToPreDly) |
3488 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3489 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3490 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3492 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3493 val | F_CLKEN | F_TERM150);
3494 (void) t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3497 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3502 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3503 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3504 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3505 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3509 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3510 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL,
3515 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3516 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3517 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3518 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3519 mc7_mode[mem_type]) ||
3520 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3521 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3524 /* clock value is in KHz */
3525 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3526 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3528 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3529 F_PERREFEN | V_PREREFDIV(mc7_clock));
3530 (void) t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3532 t3_write_reg(adapter, mc7->offset + A_MC7_ECC,
3533 F_ECCGENEN | F_ECCCHKEN);
3534 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3535 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3536 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3537 (mc7->size << width) - 1);
3538 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3539 (void) t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3544 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3545 } while ((val & F_BUSY) && --attempts);
3547 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3551 /* Enable normal memory accesses. */
3552 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3559 static void config_pcie(adapter_t *adap)
3561 static const u16 ack_lat[4][6] = {
3562 { 237, 416, 559, 1071, 2095, 4143 },
3563 { 128, 217, 289, 545, 1057, 2081 },
3564 { 73, 118, 154, 282, 538, 1050 },
3565 { 67, 107, 86, 150, 278, 534 }
3567 static const u16 rpl_tmr[4][6] = {
3568 { 711, 1248, 1677, 3213, 6285, 12429 },
3569 { 384, 651, 867, 1635, 3171, 6243 },
3570 { 219, 354, 462, 846, 1614, 3150 },
3571 { 201, 321, 258, 450, 834, 1602 }
3575 unsigned int log2_width, pldsize;
3576 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3578 t3_os_pci_read_config_2(adap,
3579 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3581 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3583 t3_os_pci_read_config_2(adap,
3584 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3587 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3588 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3589 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3590 log2_width = fls(adap->params.pci.width) - 1;
3591 acklat = ack_lat[log2_width][pldsize];
3592 if (val & 1) /* check LOsEnable */
3593 acklat += fst_trn_tx * 4;
3594 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3596 if (adap->params.rev == 0)
3597 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3598 V_T3A_ACKLAT(M_T3A_ACKLAT),
3599 V_T3A_ACKLAT(acklat));
3601 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3604 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3605 V_REPLAYLMT(rpllmt));
3607 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3608 t3_set_reg_field(adap, A_PCIE_CFG, 0,
3609 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
3613 * t3_init_hw - initialize and configure T3 HW modules
3614 * @adapter: the adapter
3615 * @fw_params: initial parameters to pass to firmware (optional)
3617 * Initialize and configure T3 HW modules. This performs the
3618 * initialization steps that need to be done once after a card is reset.
3619 * MAC and PHY initialization is handled separarely whenever a port is
3622 * @fw_params are passed to FW and their value is platform dependent.
3623 * Only the top 8 bits are available for use, the rest must be 0.
3625 int t3_init_hw(adapter_t *adapter, u32 fw_params)
3627 int err = -EIO, attempts, i;
3628 const struct vpd_params *vpd = &adapter->params.vpd;
3630 if (adapter->params.rev > 0)
3631 calibrate_xgm_t3b(adapter);
3632 else if (calibrate_xgm(adapter))
3635 if (adapter->params.nports > 2)
3636 t3_mac_reset(&adap2pinfo(adapter, 0)->mac);
3639 partition_mem(adapter, &adapter->params.tp);
3641 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3642 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3643 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3644 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3645 adapter->params.mc5.nfilters,
3646 adapter->params.mc5.nroutes))
3649 for (i = 0; i < 32; i++)
3650 if (clear_sge_ctxt(adapter, i, F_CQ))
3654 if (tp_init(adapter, &adapter->params.tp))
3657 #ifdef CONFIG_CHELSIO_T3_CORE
3658 t3_tp_set_coalescing_size(adapter,
3659 min(adapter->params.sge.max_pkt_size,
3660 MAX_RX_COALESCING_LEN), 1);
3661 t3_tp_set_max_rxsize(adapter,
3662 min(adapter->params.sge.max_pkt_size, 16384U));
3663 ulp_config(adapter, &adapter->params.tp);
3665 if (is_pcie(adapter))
3666 config_pcie(adapter);
3668 t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3669 F_DMASTOPEN | F_CLIDECEN);
3671 if (adapter->params.rev == T3_REV_C)
3672 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3673 F_CFG_CQE_SOP_MASK);
3675 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3676 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3677 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3678 chan_init_hw(adapter, adapter->params.chan_map);
3679 t3_sge_init(adapter, &adapter->params.sge);
3681 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3683 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3684 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3685 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3686 (void) t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3689 do { /* wait for uP to initialize */
3691 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3693 CH_ERR(adapter, "uP initialization timed out\n");
3703 * get_pci_mode - determine a card's PCI mode
3704 * @adapter: the adapter
3705 * @p: where to store the PCI settings
3707 * Determines a card's PCI mode and associated parameters, such as speed
3710 static void __devinit get_pci_mode(adapter_t *adapter, struct pci_params *p)
3712 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3713 u32 pci_mode, pcie_cap;
3715 pcie_cap = t3_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
3719 p->variant = PCI_VARIANT_PCIE;
3720 p->pcie_cap_addr = pcie_cap;
3721 t3_os_pci_read_config_2(adapter, pcie_cap + PCI_EXP_LNKSTA,
3723 p->width = (val >> 4) & 0x3f;
3727 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3728 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3729 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3730 pci_mode = G_PCIXINITPAT(pci_mode);
3732 p->variant = PCI_VARIANT_PCI;
3733 else if (pci_mode < 4)
3734 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3735 else if (pci_mode < 8)
3736 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3738 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3742 * init_link_config - initialize a link's SW state
3743 * @lc: structure holding the link state
3744 * @caps: link capabilities
3746 * Initializes the SW state maintained for each link, including the link's
3747 * capabilities and default speed/duplex/flow-control/autonegotiation
3750 static void __devinit init_link_config(struct link_config *lc,
3753 lc->supported = caps;
3754 lc->requested_speed = lc->speed = SPEED_INVALID;
3755 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3756 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3757 if (lc->supported & SUPPORTED_Autoneg) {
3758 lc->advertising = lc->supported;
3759 lc->autoneg = AUTONEG_ENABLE;
3760 lc->requested_fc |= PAUSE_AUTONEG;
3762 lc->advertising = 0;
3763 lc->autoneg = AUTONEG_DISABLE;
3768 * mc7_calc_size - calculate MC7 memory size
3769 * @cfg: the MC7 configuration
3771 * Calculates the size of an MC7 memory in bytes from the value of its
3772 * configuration register.
3774 static unsigned int __devinit mc7_calc_size(u32 cfg)
3776 unsigned int width = G_WIDTH(cfg);
3777 unsigned int banks = !!(cfg & F_BKS) + 1;
3778 unsigned int org = !!(cfg & F_ORG) + 1;
3779 unsigned int density = G_DEN(cfg);
3780 unsigned int MBs = ((256 << density) * banks) / (org << width);
3785 static void __devinit mc7_prep(adapter_t *adapter, struct mc7 *mc7,
3786 unsigned int base_addr, const char *name)
3790 mc7->adapter = adapter;
3792 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3793 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3794 mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3795 mc7->width = G_WIDTH(cfg);
3798 void mac_prep(struct cmac *mac, adapter_t *adapter, int index)
3800 mac->adapter = adapter;
3801 mac->multiport = adapter->params.nports > 2;
3802 if (mac->multiport) {
3803 mac->ext_port = (unsigned char)index;
3809 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3811 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3812 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3813 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3814 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3820 * early_hw_init - HW initialization done at card detection time
3821 * @adapter: the adapter
3822 * @ai: contains information about the adapter type and properties
3824 * Perfoms the part of HW initialization that is done early on when the
3825 * driver first detecs the card. Most of the HW state is initialized
3826 * lazily later on when a port or an offload function are first used.
3828 void early_hw_init(adapter_t *adapter, const struct adapter_info *ai)
3830 u32 val = V_PORTSPEED(is_10G(adapter) || adapter->params.nports > 2 ?
3833 mi1_init(adapter, ai);
3834 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3835 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3836 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3837 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3838 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3839 t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
3841 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3844 /* Enable MAC clocks so we can access the registers */
3845 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3846 (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
3848 val |= F_CLKDIVRESET_;
3849 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3850 (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
3851 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3852 (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
3856 * t3_reset_adapter - reset the adapter
3857 * @adapter: the adapter
3859 * Reset the adapter.
3861 static int t3_reset_adapter(adapter_t *adapter)
3863 int i, save_and_restore_pcie =
3864 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3867 if (save_and_restore_pcie)
3868 t3_os_pci_save_state(adapter);
3869 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3872 * Delay. Give Some time to device to reset fully.
3873 * XXX The delay time should be modified.
3875 for (i = 0; i < 10; i++) {
3877 t3_os_pci_read_config_2(adapter, 0x00, &devid);
3878 if (devid == 0x1425)
3882 if (devid != 0x1425)
3885 if (save_and_restore_pcie)
3886 t3_os_pci_restore_state(adapter);
3890 static int init_parity(adapter_t *adap)
3894 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3897 for (err = i = 0; !err && i < 16; i++)
3898 err = clear_sge_ctxt(adap, i, F_EGRESS);
3899 for (i = 0xfff0; !err && i <= 0xffff; i++)
3900 err = clear_sge_ctxt(adap, i, F_EGRESS);
3901 for (i = 0; !err && i < SGE_QSETS; i++)
3902 err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3906 t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3907 for (i = 0; i < 4; i++)
3908 for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3909 t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3910 F_IBQDBGWR | V_IBQDBGQID(i) |
3911 V_IBQDBGADDR(addr));
3912 err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3913 F_IBQDBGBUSY, 0, 2, 1);
3921 * t3_prep_adapter - prepare SW and HW for operation
3922 * @adapter: the adapter
3923 * @ai: contains information about the adapter type and properties
3925 * Initialize adapter SW state for the various HW modules, set initial
3926 * values for some adapter tunables, take PHYs out of reset, and
3927 * initialize the MDIO interface.
3929 int __devinit t3_prep_adapter(adapter_t *adapter,
3930 const struct adapter_info *ai, int reset)
3933 unsigned int i, j = 0;
3935 get_pci_mode(adapter, &adapter->params.pci);
3937 adapter->params.info = ai;
3938 adapter->params.nports = ai->nports0 + ai->nports1;
3939 adapter->params.chan_map = !!ai->nports0 | (!!ai->nports1 << 1);
3940 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3941 adapter->params.linkpoll_period = 0;
3942 if (adapter->params.nports > 2)
3943 adapter->params.stats_update_period = VSC_STATS_ACCUM_SECS;
3945 adapter->params.stats_update_period = is_10G(adapter) ?
3946 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3947 adapter->params.pci.vpd_cap_addr =
3948 t3_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
3950 ret = get_vpd_params(adapter, &adapter->params.vpd);
3954 if (reset && t3_reset_adapter(adapter))
3957 t3_sge_prep(adapter, &adapter->params.sge);
3959 if (adapter->params.vpd.mclk) {
3960 struct tp_params *p = &adapter->params.tp;
3962 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3963 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3964 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3966 p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
3967 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3968 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3969 p->cm_size = t3_mc7_size(&adapter->cm);
3970 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3971 p->chan_tx_size = p->pmtx_size / p->nchan;
3972 p->rx_pg_size = 64 * 1024;
3973 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3974 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3975 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3976 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3977 adapter->params.rev > 0 ? 12 : 6;
3978 p->tre = fls(adapter->params.vpd.cclk / (1000 / TP_TMR_RES)) -
3980 p->dack_re = fls(adapter->params.vpd.cclk / 10) - 1; /* 100us */
3983 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3984 t3_mc7_size(&adapter->pmtx) &&
3985 t3_mc7_size(&adapter->cm);
3987 if (is_offload(adapter)) {
3988 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3989 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3990 DEFAULT_NFILTERS : 0;
3991 adapter->params.mc5.nroutes = 0;
3992 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3994 #ifdef CONFIG_CHELSIO_T3_CORE
3995 init_mtus(adapter->params.mtus);
3996 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
4000 early_hw_init(adapter, ai);
4001 ret = init_parity(adapter);
4005 if (adapter->params.nports > 2 &&
4006 (ret = t3_vsc7323_init(adapter, adapter->params.nports)))
4009 for_each_port(adapter, i) {
4011 const struct port_type_info *pti;
4012 struct port_info *p = adap2pinfo(adapter, i);
4014 while (!adapter->params.vpd.port_type[j])
4017 pti = &port_types[adapter->params.vpd.port_type[j]];
4018 ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
4022 mac_prep(&p->mac, adapter, j);
4026 * The VPD EEPROM stores the base Ethernet address for the
4027 * card. A port's address is derived from the base by adding
4028 * the port's index to the base's low octet.
4030 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
4031 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
4033 t3_os_set_hw_addr(adapter, i, hw_addr);
4034 init_link_config(&p->link_config, p->phy.caps);
4035 p->phy.ops->power_down(&p->phy, 1);
4036 if (!(p->phy.caps & SUPPORTED_IRQ))
4037 adapter->params.linkpoll_period = 10;
4044 * t3_reinit_adapter - prepare HW for operation again
4045 * @adapter: the adapter
4047 * Put HW in the same state as @t3_prep_adapter without any changes to
4048 * SW state. This is a cut down version of @t3_prep_adapter intended
4049 * to be used after events that wipe out HW state but preserve SW state,
4050 * e.g., EEH. The device must be reset before calling this.
4052 int t3_reinit_adapter(adapter_t *adap)
4057 early_hw_init(adap, adap->params.info);
4058 ret = init_parity(adap);
4062 if (adap->params.nports > 2 &&
4063 (ret = t3_vsc7323_init(adap, adap->params.nports)))
4066 for_each_port(adap, i) {
4067 const struct port_type_info *pti;
4068 struct port_info *p = adap2pinfo(adap, i);
4070 while (!adap->params.vpd.port_type[++j])
4073 pti = &port_types[adap->params.vpd.port_type[j]];
4074 ret = pti->phy_prep(&p->phy, adap, p->phy.addr, NULL);
4077 p->phy.ops->power_down(&p->phy, 1);
4082 void t3_led_ready(adapter_t *adapter)
4084 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
4088 void t3_port_failover(adapter_t *adapter, int port)
4092 val = port ? F_PORT1ACTIVE : F_PORT0ACTIVE;
4093 t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4097 void t3_failover_done(adapter_t *adapter, int port)
4099 t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4100 F_PORT0ACTIVE | F_PORT1ACTIVE);
4103 void t3_failover_clear(adapter_t *adapter)
4105 t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4106 F_PORT0ACTIVE | F_PORT1ACTIVE);