1 /**************************************************************************
3 Copyright (c) 2007-2009, Chelsio Inc.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
28 ***************************************************************************/
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
34 #include <cxgb_include.h>
37 #define msleep t3_os_sleep
40 * t3_wait_op_done_val - wait until an operation is completed
41 * @adapter: the adapter performing the operation
42 * @reg: the register to check for completion
43 * @mask: a single-bit field within @reg that indicates completion
44 * @polarity: the value of the field when the operation is completed
45 * @attempts: number of check iterations
46 * @delay: delay in usecs between iterations
47 * @valp: where to store the value of the register at completion time
49 * Wait until an operation is completed by checking a bit in a register
50 * up to @attempts times. If @valp is not NULL the value of the register
51 * at the time it indicated completion is stored there. Returns 0 if the
52 * operation completes and -EAGAIN otherwise.
54 int t3_wait_op_done_val(adapter_t *adapter, int reg, u32 mask, int polarity,
55 int attempts, int delay, u32 *valp)
58 u32 val = t3_read_reg(adapter, reg);
60 if (!!(val & mask) == polarity) {
73 * t3_write_regs - write a bunch of registers
74 * @adapter: the adapter to program
75 * @p: an array of register address/register value pairs
76 * @n: the number of address/value pairs
77 * @offset: register address offset
79 * Takes an array of register address/register value pairs and writes each
80 * value to the corresponding register. Register addresses are adjusted
81 * by the supplied offset.
83 void t3_write_regs(adapter_t *adapter, const struct addr_val_pair *p, int n,
87 t3_write_reg(adapter, p->reg_addr + offset, p->val);
93 * t3_set_reg_field - set a register field to a value
94 * @adapter: the adapter to program
95 * @addr: the register address
96 * @mask: specifies the portion of the register to modify
97 * @val: the new value for the register field
99 * Sets a register field specified by the supplied mask to the
102 void t3_set_reg_field(adapter_t *adapter, unsigned int addr, u32 mask, u32 val)
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
106 t3_write_reg(adapter, addr, v | val);
107 (void) t3_read_reg(adapter, addr); /* flush */
111 * t3_read_indirect - read indirectly addressed registers
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
119 * Reads registers that are accessed indirectly through an address/data
122 static void t3_read_indirect(adapter_t *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals, unsigned int nregs,
124 unsigned int start_idx)
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
143 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
146 static int shift[] = { 0, 0, 16, 24 };
147 static int step[] = { 0, 32, 16, 8 };
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 adapter_t *adap = mc7->adapter;
152 if (start >= size64 || start + n > size64)
155 start *= (8 << mc7->width);
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR,
166 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
167 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
168 while ((val & F_BUSY) && attempts--)
169 val = t3_read_reg(adap,
170 mc7->offset + A_MC7_BD_OP);
174 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
175 if (mc7->width == 0) {
176 val64 = t3_read_reg(adap,
177 mc7->offset + A_MC7_BD_DATA0);
178 val64 |= (u64)val << 32;
181 val >>= shift[mc7->width];
182 val64 |= (u64)val << (step[mc7->width] * i);
194 static void mi1_init(adapter_t *adap, const struct adapter_info *ai)
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
197 u32 val = F_PREEN | V_CLKDIV(clkdiv);
199 t3_write_reg(adap, A_MI1_CFG, val);
202 #define MDIO_ATTEMPTS 20
205 * MI1 read/write operations for clause 22 PHYs.
207 int t3_mi1_read(adapter_t *adapter, int phy_addr, int mmd_addr,
208 int reg_addr, unsigned int *valp)
211 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
217 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
218 t3_write_reg(adapter, A_MI1_ADDR, addr);
219 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
220 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
222 *valp = t3_read_reg(adapter, A_MI1_DATA);
223 MDIO_UNLOCK(adapter);
227 int t3_mi1_write(adapter_t *adapter, int phy_addr, int mmd_addr,
228 int reg_addr, unsigned int val)
231 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
237 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
238 t3_write_reg(adapter, A_MI1_ADDR, addr);
239 t3_write_reg(adapter, A_MI1_DATA, val);
240 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
241 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
242 MDIO_UNLOCK(adapter);
246 static struct mdio_ops mi1_mdio_ops = {
252 * MI1 read/write operations for clause 45 PHYs.
254 static int mi1_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
255 int reg_addr, unsigned int *valp)
258 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
261 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
262 t3_write_reg(adapter, A_MI1_ADDR, addr);
263 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
264 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
265 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
267 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
268 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
271 *valp = t3_read_reg(adapter, A_MI1_DATA);
273 MDIO_UNLOCK(adapter);
277 static int mi1_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
278 int reg_addr, unsigned int val)
281 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
284 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
285 t3_write_reg(adapter, A_MI1_ADDR, addr);
286 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
287 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
288 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
290 t3_write_reg(adapter, A_MI1_DATA, val);
291 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
292 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
295 MDIO_UNLOCK(adapter);
299 static struct mdio_ops mi1_mdio_ext_ops = {
305 * t3_mdio_change_bits - modify the value of a PHY register
306 * @phy: the PHY to operate on
307 * @mmd: the device address
308 * @reg: the register address
309 * @clear: what part of the register value to mask off
310 * @set: what part of the register value to set
312 * Changes the value of a PHY register by applying a mask to its current
313 * value and ORing the result with a new value.
315 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
321 ret = mdio_read(phy, mmd, reg, &val);
324 ret = mdio_write(phy, mmd, reg, val | set);
330 * t3_phy_reset - reset a PHY block
331 * @phy: the PHY to operate on
332 * @mmd: the device address of the PHY block to reset
333 * @wait: how long to wait for the reset to complete in 1ms increments
335 * Resets a PHY block and optionally waits for the reset to complete.
336 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
339 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
344 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
349 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
355 } while (ctl && --wait);
361 * t3_phy_advertise - set the PHY advertisement registers for autoneg
362 * @phy: the PHY to operate on
363 * @advert: bitmap of capabilities the PHY should advertise
365 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
366 * requested capabilities.
368 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
371 unsigned int val = 0;
373 err = mdio_read(phy, 0, MII_CTRL1000, &val);
377 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
378 if (advert & ADVERTISED_1000baseT_Half)
379 val |= ADVERTISE_1000HALF;
380 if (advert & ADVERTISED_1000baseT_Full)
381 val |= ADVERTISE_1000FULL;
383 err = mdio_write(phy, 0, MII_CTRL1000, val);
388 if (advert & ADVERTISED_10baseT_Half)
389 val |= ADVERTISE_10HALF;
390 if (advert & ADVERTISED_10baseT_Full)
391 val |= ADVERTISE_10FULL;
392 if (advert & ADVERTISED_100baseT_Half)
393 val |= ADVERTISE_100HALF;
394 if (advert & ADVERTISED_100baseT_Full)
395 val |= ADVERTISE_100FULL;
396 if (advert & ADVERTISED_Pause)
397 val |= ADVERTISE_PAUSE_CAP;
398 if (advert & ADVERTISED_Asym_Pause)
399 val |= ADVERTISE_PAUSE_ASYM;
400 return mdio_write(phy, 0, MII_ADVERTISE, val);
404 * t3_phy_advertise_fiber - set fiber PHY advertisement register
405 * @phy: the PHY to operate on
406 * @advert: bitmap of capabilities the PHY should advertise
408 * Sets a fiber PHY's advertisement register to advertise the
409 * requested capabilities.
411 int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
413 unsigned int val = 0;
415 if (advert & ADVERTISED_1000baseT_Half)
416 val |= ADVERTISE_1000XHALF;
417 if (advert & ADVERTISED_1000baseT_Full)
418 val |= ADVERTISE_1000XFULL;
419 if (advert & ADVERTISED_Pause)
420 val |= ADVERTISE_1000XPAUSE;
421 if (advert & ADVERTISED_Asym_Pause)
422 val |= ADVERTISE_1000XPSE_ASYM;
423 return mdio_write(phy, 0, MII_ADVERTISE, val);
427 * t3_set_phy_speed_duplex - force PHY speed and duplex
428 * @phy: the PHY to operate on
429 * @speed: requested PHY speed
430 * @duplex: requested PHY duplex
432 * Force a 10/100/1000 PHY's speed and duplex. This also disables
433 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
435 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
440 err = mdio_read(phy, 0, MII_BMCR, &ctl);
445 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
446 if (speed == SPEED_100)
447 ctl |= BMCR_SPEED100;
448 else if (speed == SPEED_1000)
449 ctl |= BMCR_SPEED1000;
452 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
453 if (duplex == DUPLEX_FULL)
454 ctl |= BMCR_FULLDPLX;
456 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
457 ctl |= BMCR_ANENABLE;
458 return mdio_write(phy, 0, MII_BMCR, ctl);
461 int t3_phy_lasi_intr_enable(struct cphy *phy)
463 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
466 int t3_phy_lasi_intr_disable(struct cphy *phy)
468 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
471 int t3_phy_lasi_intr_clear(struct cphy *phy)
475 return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
478 int t3_phy_lasi_intr_handler(struct cphy *phy)
481 int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
485 return (status & 1) ? cphy_cause_link_change : 0;
488 static struct adapter_info t3_adap_info[] = {
490 F_GPIO2_OEN | F_GPIO4_OEN |
491 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
492 &mi1_mdio_ops, "Chelsio PE9000" },
494 F_GPIO2_OEN | F_GPIO4_OEN |
495 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
496 &mi1_mdio_ops, "Chelsio T302" },
498 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
499 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
500 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
501 &mi1_mdio_ext_ops, "Chelsio T310" },
503 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
504 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
505 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
506 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
507 &mi1_mdio_ext_ops, "Chelsio T320" },
509 F_GPIO5_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO5_OUT_VAL |
510 F_GPIO6_OUT_VAL | F_GPIO7_OUT_VAL,
511 { S_GPIO1, S_GPIO2, S_GPIO3, S_GPIO4 }, SUPPORTED_AUI,
512 &mi1_mdio_ops, "Chelsio T304" },
515 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
516 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
517 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
518 &mi1_mdio_ext_ops, "Chelsio N310" }
522 * Return the adapter_info structure with a given index. Out-of-range indices
525 const struct adapter_info *t3_get_adapter_info(unsigned int id)
527 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
530 struct port_type_info {
531 int (*phy_prep)(struct cphy *phy, adapter_t *adapter, int phy_addr,
532 const struct mdio_ops *ops);
535 static struct port_type_info port_types[] = {
537 { t3_ael1002_phy_prep },
538 { t3_vsc8211_phy_prep },
539 { t3_mv88e1xxx_phy_prep },
540 { t3_xaui_direct_phy_prep },
541 { t3_ael2005_phy_prep },
542 { t3_qt2045_phy_prep },
543 { t3_ael1006_phy_prep },
544 { t3_tn1010_phy_prep },
547 #define VPD_ENTRY(name, len) \
548 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
551 * Partial EEPROM Vital Product Data structure. Includes only the ID and
560 VPD_ENTRY(pn, 16); /* part number */
561 VPD_ENTRY(ec, ECNUM_LEN); /* EC level */
562 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
563 VPD_ENTRY(na, 12); /* MAC address base */
564 VPD_ENTRY(cclk, 6); /* core clock */
565 VPD_ENTRY(mclk, 6); /* mem clock */
566 VPD_ENTRY(uclk, 6); /* uP clk */
567 VPD_ENTRY(mdc, 6); /* MDIO clk */
568 VPD_ENTRY(mt, 2); /* mem timing */
569 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
570 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
571 VPD_ENTRY(port0, 2); /* PHY0 complex */
572 VPD_ENTRY(port1, 2); /* PHY1 complex */
573 VPD_ENTRY(port2, 2); /* PHY2 complex */
574 VPD_ENTRY(port3, 2); /* PHY3 complex */
575 VPD_ENTRY(rv, 1); /* csum */
576 u32 pad; /* for multiple-of-4 sizing and alignment */
579 #define EEPROM_MAX_POLL 40
580 #define EEPROM_STAT_ADDR 0x4000
581 #define VPD_BASE 0xc00
584 * t3_seeprom_read - read a VPD EEPROM location
585 * @adapter: adapter to read
586 * @addr: EEPROM address
587 * @data: where to store the read data
589 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
590 * VPD ROM capability. A zero is written to the flag bit when the
591 * addres is written to the control register. The hardware device will
592 * set the flag to 1 when 4 bytes have been read into the data register.
594 int t3_seeprom_read(adapter_t *adapter, u32 addr, u32 *data)
597 int attempts = EEPROM_MAX_POLL;
598 unsigned int base = adapter->params.pci.vpd_cap_addr;
600 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
603 t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR, (u16)addr);
606 t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
607 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
609 if (!(val & PCI_VPD_ADDR_F)) {
610 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
613 t3_os_pci_read_config_4(adapter, base + PCI_VPD_DATA, data);
614 *data = le32_to_cpu(*data);
619 * t3_seeprom_write - write a VPD EEPROM location
620 * @adapter: adapter to write
621 * @addr: EEPROM address
622 * @data: value to write
624 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
625 * VPD ROM capability.
627 int t3_seeprom_write(adapter_t *adapter, u32 addr, u32 data)
630 int attempts = EEPROM_MAX_POLL;
631 unsigned int base = adapter->params.pci.vpd_cap_addr;
633 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
636 t3_os_pci_write_config_4(adapter, base + PCI_VPD_DATA,
638 t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR,
639 (u16)addr | PCI_VPD_ADDR_F);
642 t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
643 } while ((val & PCI_VPD_ADDR_F) && --attempts);
645 if (val & PCI_VPD_ADDR_F) {
646 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
653 * t3_seeprom_wp - enable/disable EEPROM write protection
654 * @adapter: the adapter
655 * @enable: 1 to enable write protection, 0 to disable it
657 * Enables or disables write protection on the serial EEPROM.
659 int t3_seeprom_wp(adapter_t *adapter, int enable)
661 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
665 * Convert a character holding a hex digit to a number.
667 static unsigned int hex2int(unsigned char c)
669 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
673 * get_vpd_params - read VPD parameters from VPD EEPROM
674 * @adapter: adapter to read
675 * @p: where to store the parameters
677 * Reads card parameters stored in VPD EEPROM.
679 static int get_vpd_params(adapter_t *adapter, struct vpd_params *p)
685 * Card information is normally at VPD_BASE but some early cards had
688 ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
691 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
693 for (i = 0; i < sizeof(vpd); i += 4) {
694 ret = t3_seeprom_read(adapter, addr + i,
695 (u32 *)((u8 *)&vpd + i));
700 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
701 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
702 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
703 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
704 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
705 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
706 memcpy(p->ec, vpd.ec_data, ECNUM_LEN);
708 /* Old eeproms didn't have port information */
709 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
710 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
711 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
713 p->port_type[0] = (u8)hex2int(vpd.port0_data[0]);
714 p->port_type[1] = (u8)hex2int(vpd.port1_data[0]);
715 p->port_type[2] = (u8)hex2int(vpd.port2_data[0]);
716 p->port_type[3] = (u8)hex2int(vpd.port3_data[0]);
717 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
718 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
721 for (i = 0; i < 6; i++)
722 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
723 hex2int(vpd.na_data[2 * i + 1]);
727 /* BIOS boot header */
728 typedef struct boot_header_s {
729 u8 signature[2]; /* signature */
730 u8 length; /* image length (include header) */
731 u8 offset[4]; /* initialization vector */
732 u8 reserved[19]; /* reserved */
733 u8 exheader[2]; /* offset to expansion header */
736 /* serial flash and firmware constants */
738 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
739 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
740 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
742 /* flash command opcodes */
743 SF_PROG_PAGE = 2, /* program page */
744 SF_WR_DISABLE = 4, /* disable writes */
745 SF_RD_STATUS = 5, /* read status register */
746 SF_WR_ENABLE = 6, /* enable writes */
747 SF_RD_DATA_FAST = 0xb, /* read flash */
748 SF_ERASE_SECTOR = 0xd8, /* erase sector */
750 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
751 FW_VERS_ADDR = 0x7fffc, /* flash address holding FW version */
752 FW_VERS_ADDR_PRE8 = 0x77ffc,/* flash address holding FW version pre8 */
753 FW_MIN_SIZE = 8, /* at least version and csum */
754 FW_MAX_SIZE = FW_VERS_ADDR - FW_FLASH_BOOT_ADDR,
755 FW_MAX_SIZE_PRE8 = FW_VERS_ADDR_PRE8 - FW_FLASH_BOOT_ADDR,
757 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
758 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */
759 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */
760 BOOT_MIN_SIZE = sizeof(boot_header_t), /* at least basic header */
761 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC /* 1 byte * length increment */
765 * sf1_read - read data from the serial flash
766 * @adapter: the adapter
767 * @byte_cnt: number of bytes to read
768 * @cont: whether another operation will be chained
769 * @valp: where to store the read data
771 * Reads up to 4 bytes of data from the serial flash. The location of
772 * the read needs to be specified prior to calling this by issuing the
773 * appropriate commands to the serial flash.
775 static int sf1_read(adapter_t *adapter, unsigned int byte_cnt, int cont,
780 if (!byte_cnt || byte_cnt > 4)
782 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
784 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
785 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
787 *valp = t3_read_reg(adapter, A_SF_DATA);
792 * sf1_write - write data to the serial flash
793 * @adapter: the adapter
794 * @byte_cnt: number of bytes to write
795 * @cont: whether another operation will be chained
796 * @val: value to write
798 * Writes up to 4 bytes of data to the serial flash. The location of
799 * the write needs to be specified prior to calling this by issuing the
800 * appropriate commands to the serial flash.
802 static int sf1_write(adapter_t *adapter, unsigned int byte_cnt, int cont,
805 if (!byte_cnt || byte_cnt > 4)
807 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
809 t3_write_reg(adapter, A_SF_DATA, val);
810 t3_write_reg(adapter, A_SF_OP,
811 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
812 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
816 * flash_wait_op - wait for a flash operation to complete
817 * @adapter: the adapter
818 * @attempts: max number of polls of the status register
819 * @delay: delay between polls in ms
821 * Wait for a flash operation to complete by polling the status register.
823 static int flash_wait_op(adapter_t *adapter, int attempts, int delay)
829 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
830 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
842 * t3_read_flash - read words from serial flash
843 * @adapter: the adapter
844 * @addr: the start address for the read
845 * @nwords: how many 32-bit words to read
846 * @data: where to store the read data
847 * @byte_oriented: whether to store data as bytes or as words
849 * Read the specified number of 32-bit words from the serial flash.
850 * If @byte_oriented is set the read data is stored as a byte array
851 * (i.e., big-endian), otherwise as 32-bit words in the platform's
854 int t3_read_flash(adapter_t *adapter, unsigned int addr, unsigned int nwords,
855 u32 *data, int byte_oriented)
859 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
862 addr = swab32(addr) | SF_RD_DATA_FAST;
864 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
865 (ret = sf1_read(adapter, 1, 1, data)) != 0)
868 for ( ; nwords; nwords--, data++) {
869 ret = sf1_read(adapter, 4, nwords > 1, data);
873 *data = htonl(*data);
879 * t3_write_flash - write up to a page of data to the serial flash
880 * @adapter: the adapter
881 * @addr: the start address to write
882 * @n: length of data to write
883 * @data: the data to write
884 * @byte_oriented: whether to store data as bytes or as words
886 * Writes up to a page of data (256 bytes) to the serial flash starting
887 * at the given address.
888 * If @byte_oriented is set the write data is stored as a 32-bit
889 * big-endian array, otherwise in the processor's native endianess.
892 static int t3_write_flash(adapter_t *adapter, unsigned int addr,
893 unsigned int n, const u8 *data,
898 unsigned int c, left, val, offset = addr & 0xff;
900 if (addr + n > SF_SIZE || offset + n > 256)
903 val = swab32(addr) | SF_PROG_PAGE;
905 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
906 (ret = sf1_write(adapter, 4, 1, val)) != 0)
909 for (left = n; left; left -= c) {
911 val = *(const u32*)data;
916 ret = sf1_write(adapter, c, c != left, val);
920 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
923 /* Read the page to verify the write succeeded */
924 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
929 if (memcmp(data - n, (u8 *)buf + offset, n))
935 * t3_get_tp_version - read the tp sram version
936 * @adapter: the adapter
937 * @vers: where to place the version
939 * Reads the protocol sram version from sram.
941 int t3_get_tp_version(adapter_t *adapter, u32 *vers)
945 /* Get version loaded in SRAM */
946 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
947 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
952 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
958 * t3_check_tpsram_version - read the tp sram version
959 * @adapter: the adapter
962 int t3_check_tpsram_version(adapter_t *adapter)
966 unsigned int major, minor;
968 if (adapter->params.rev == T3_REV_A)
972 ret = t3_get_tp_version(adapter, &vers);
976 vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
978 major = G_TP_VERSION_MAJOR(vers);
979 minor = G_TP_VERSION_MINOR(vers);
981 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
984 CH_ERR(adapter, "found wrong TP version (%u.%u), "
985 "driver compiled for version %d.%d\n", major, minor,
986 TP_VERSION_MAJOR, TP_VERSION_MINOR);
992 * t3_check_tpsram - check if provided protocol SRAM
993 * is compatible with this driver
994 * @adapter: the adapter
995 * @tp_sram: the firmware image to write
998 * Checks if an adapter's tp sram is compatible with the driver.
999 * Returns 0 if the versions are compatible, a negative error otherwise.
1001 int t3_check_tpsram(adapter_t *adapter, const u8 *tp_sram, unsigned int size)
1005 const u32 *p = (const u32 *)tp_sram;
1007 /* Verify checksum */
1008 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1009 csum += ntohl(p[i]);
1010 if (csum != 0xffffffff) {
1011 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
1019 enum fw_version_type {
1025 * t3_get_fw_version - read the firmware version
1026 * @adapter: the adapter
1027 * @vers: where to place the version
1029 * Reads the FW version from flash. Note that we had to move the version
1030 * due to FW size. If we don't find a valid FW version in the new location
1031 * we fall back and read the old location.
1033 int t3_get_fw_version(adapter_t *adapter, u32 *vers)
1035 int ret = t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1036 if (!ret && *vers != 0xffffffff)
1039 return t3_read_flash(adapter, FW_VERS_ADDR_PRE8, 1, vers, 0);
1043 * t3_check_fw_version - check if the FW is compatible with this driver
1044 * @adapter: the adapter
1046 * Checks if an adapter's FW is compatible with the driver. Returns 0
1047 * if the versions are compatible, a negative error otherwise.
1049 int t3_check_fw_version(adapter_t *adapter)
1053 unsigned int type, major, minor;
1055 ret = t3_get_fw_version(adapter, &vers);
1059 type = G_FW_VERSION_TYPE(vers);
1060 major = G_FW_VERSION_MAJOR(vers);
1061 minor = G_FW_VERSION_MINOR(vers);
1063 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1064 minor == FW_VERSION_MINOR)
1067 else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
1068 CH_WARN(adapter, "found old FW minor version(%u.%u), "
1069 "driver compiled for version %u.%u\n", major, minor,
1070 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1072 CH_WARN(adapter, "found newer FW version(%u.%u), "
1073 "driver compiled for version %u.%u\n", major, minor,
1074 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1081 * t3_flash_erase_sectors - erase a range of flash sectors
1082 * @adapter: the adapter
1083 * @start: the first sector to erase
1084 * @end: the last sector to erase
1086 * Erases the sectors in the given range.
1088 static int t3_flash_erase_sectors(adapter_t *adapter, int start, int end)
1090 while (start <= end) {
1093 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1094 (ret = sf1_write(adapter, 4, 0,
1095 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1096 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1104 * t3_load_fw - download firmware
1105 * @adapter: the adapter
1106 * @fw_data: the firmware image to write
1109 * Write the supplied firmware image to the card's serial flash.
1110 * The FW image has the following sections: @size - 8 bytes of code and
1111 * data, followed by 4 bytes of FW version, followed by the 32-bit
1112 * 1's complement checksum of the whole image.
1114 int t3_load_fw(adapter_t *adapter, const u8 *fw_data, unsigned int size)
1116 u32 version, csum, fw_version_addr;
1118 const u32 *p = (const u32 *)fw_data;
1119 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1121 if ((size & 3) || size < FW_MIN_SIZE)
1123 if (size - 8 > FW_MAX_SIZE)
1126 version = ntohl(*(const u32 *)(fw_data + size - 8));
1127 if (G_FW_VERSION_MAJOR(version) < 8) {
1129 fw_version_addr = FW_VERS_ADDR_PRE8;
1131 if (size - 8 > FW_MAX_SIZE_PRE8)
1134 fw_version_addr = FW_VERS_ADDR;
1136 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1137 csum += ntohl(p[i]);
1138 if (csum != 0xffffffff) {
1139 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1144 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1148 size -= 8; /* trim off version and checksum */
1149 for (addr = FW_FLASH_BOOT_ADDR; size; ) {
1150 unsigned int chunk_size = min(size, 256U);
1152 ret = t3_write_flash(adapter, addr, chunk_size, fw_data, 1);
1157 fw_data += chunk_size;
1161 ret = t3_write_flash(adapter, fw_version_addr, 4, fw_data, 1);
1164 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1169 * t3_load_boot - download boot flash
1170 * @adapter: the adapter
1171 * @boot_data: the boot image to write
1174 * Write the supplied boot image to the card's serial flash.
1175 * The boot image has the following sections: a 28-byte header and the
1178 int t3_load_boot(adapter_t *adapter, u8 *boot_data, unsigned int size)
1180 boot_header_t *header = (boot_header_t *)boot_data;
1183 unsigned int boot_sector = BOOT_FLASH_BOOT_ADDR >> 16;
1184 unsigned int boot_end = (BOOT_FLASH_BOOT_ADDR + size - 1) >> 16;
1187 * Perform some primitive sanity testing to avoid accidentally
1188 * writing garbage over the boot sectors. We ought to check for
1189 * more but it's not worth it for now ...
1191 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
1192 CH_ERR(adapter, "boot image too small/large\n");
1195 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE) {
1196 CH_ERR(adapter, "boot image missing signature\n");
1199 if (header->length * BOOT_SIZE_INC != size) {
1200 CH_ERR(adapter, "boot image header length != image length\n");
1204 ret = t3_flash_erase_sectors(adapter, boot_sector, boot_end);
1208 for (addr = BOOT_FLASH_BOOT_ADDR; size; ) {
1209 unsigned int chunk_size = min(size, 256U);
1211 ret = t3_write_flash(adapter, addr, chunk_size, boot_data, 0);
1216 boot_data += chunk_size;
1222 CH_ERR(adapter, "boot image download failed, error %d\n", ret);
1226 #define CIM_CTL_BASE 0x2000
1229 * t3_cim_ctl_blk_read - read a block from CIM control region
1230 * @adap: the adapter
1231 * @addr: the start address within the CIM control region
1232 * @n: number of words to read
1233 * @valp: where to store the result
1235 * Reads a block of 4-byte words from the CIM control region.
1237 int t3_cim_ctl_blk_read(adapter_t *adap, unsigned int addr, unsigned int n,
1242 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1245 for ( ; !ret && n--; addr += 4) {
1246 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1247 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1250 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1255 static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg,
1256 u32 *rx_hash_high, u32 *rx_hash_low)
1258 /* stop Rx unicast traffic */
1259 t3_mac_disable_exact_filters(mac);
1261 /* stop broadcast, multicast, promiscuous mode traffic */
1262 *rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG);
1263 t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1264 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1267 *rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH);
1268 t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, 0);
1270 *rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW);
1271 t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, 0);
1273 /* Leave time to drain max RX fifo */
1277 static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg,
1278 u32 rx_hash_high, u32 rx_hash_low)
1280 t3_mac_enable_exact_filters(mac);
1281 t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1282 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1284 t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, rx_hash_high);
1285 t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, rx_hash_low);
1288 static int t3_detect_link_fault(adapter_t *adapter, int port_id)
1290 struct port_info *pi = adap2pinfo(adapter, port_id);
1291 struct cmac *mac = &pi->mac;
1292 uint32_t rx_cfg, rx_hash_high, rx_hash_low;
1296 t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1297 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1299 /* clear status and make sure intr is enabled */
1300 (void) t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1301 t3_xgm_intr_enable(adapter, port_id);
1304 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, F_RXEN);
1305 t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1307 link_fault = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1308 return (link_fault & F_LINKFAULTCHANGE ? 1 : 0);
1311 static void t3_clear_faults(adapter_t *adapter, int port_id)
1313 struct port_info *pi = adap2pinfo(adapter, port_id);
1314 struct cmac *mac = &pi->mac;
1316 t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + mac->offset,
1318 t3_mac_enable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1319 t3_set_reg_field(adapter, A_XGM_STAT_CTRL + mac->offset, F_CLRSTATS, 1);
1321 if (adapter->params.nports <= 2) {
1322 t3_xgm_intr_disable(adapter, pi->port_id);
1323 t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1324 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, F_XGM_INT);
1325 t3_set_reg_field(adapter, A_XGM_INT_ENABLE + mac->offset,
1326 F_XGM_INT, F_XGM_INT);
1327 t3_xgm_intr_enable(adapter, pi->port_id);
1332 * t3_link_changed - handle interface link changes
1333 * @adapter: the adapter
1334 * @port_id: the port index that changed link state
1336 * Called when a port's link settings change to propagate the new values
1337 * to the associated PHY and MAC. After performing the common tasks it
1338 * invokes an OS-specific handler.
1340 void t3_link_changed(adapter_t *adapter, int port_id)
1342 int link_ok, speed, duplex, fc, link_fault, link_change;
1343 struct port_info *pi = adap2pinfo(adapter, port_id);
1344 struct cphy *phy = &pi->phy;
1345 struct cmac *mac = &pi->mac;
1346 struct link_config *lc = &pi->link_config;
1348 link_ok = lc->link_ok;
1350 duplex = lc->duplex;
1354 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1357 * Check for link faults if any of these is true:
1358 * a) A link fault is suspected, and PHY says link ok
1359 * b) PHY link transitioned from down -> up
1361 if (adapter->params.nports <= 2 &&
1362 ((pi->link_fault && link_ok) || (!lc->link_ok && link_ok))) {
1364 link_fault = t3_detect_link_fault(adapter, port_id);
1366 if (pi->link_fault != LF_YES) {
1367 mac->stats.link_faults++;
1368 pi->link_fault = LF_YES;
1371 /* Don't report link up or any other change */
1374 duplex = lc->duplex;
1377 /* clear faults here if this was a false alarm. */
1378 if (pi->link_fault == LF_MAYBE &&
1379 link_ok && lc->link_ok)
1380 t3_clear_faults(adapter, port_id);
1382 pi->link_fault = LF_NO;
1386 if (lc->requested_fc & PAUSE_AUTONEG)
1387 fc &= lc->requested_fc;
1389 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1391 if (link_ok == lc->link_ok && speed == lc->speed &&
1392 duplex == lc->duplex && fc == lc->fc)
1393 return; /* nothing changed */
1395 link_change = link_ok != lc->link_ok;
1396 lc->link_ok = (unsigned char)link_ok;
1397 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1398 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1402 /* down -> up, or up -> up with changed settings */
1404 if (link_change && adapter->params.rev > 0 &&
1405 uses_xaui(adapter)) {
1407 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1408 F_TXACTENABLE | F_RXEN);
1411 if (speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1412 /* Set MAC settings to match PHY. */
1413 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1414 lc->fc = (unsigned char)fc;
1417 t3_clear_faults(adapter, port_id);
1423 if (adapter->params.rev > 0 && uses_xaui(adapter)) {
1424 t3_write_reg(adapter,
1425 A_XGM_XAUI_ACT_CTRL + mac->offset, 0);
1428 t3_xgm_intr_disable(adapter, pi->port_id);
1429 if (adapter->params.nports <= 2) {
1430 t3_set_reg_field(adapter,
1431 A_XGM_INT_ENABLE + mac->offset,
1436 if (is_10G(adapter))
1437 pi->phy.ops->power_down(&pi->phy, 1);
1438 t3_mac_disable(mac, MAC_DIRECTION_RX);
1439 t3_link_start(phy, mac, lc);
1443 * Make sure Tx FIFO continues to drain, even as rxen is left
1444 * high to help detect and indicate remote faults.
1446 t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + mac->offset, 0,
1448 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1449 t3_write_reg(adapter, A_XGM_TX_CTRL + mac->offset, F_TXEN);
1450 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, F_RXEN);
1453 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1457 * t3_link_start - apply link configuration to MAC/PHY
1458 * @phy: the PHY to setup
1459 * @mac: the MAC to setup
1460 * @lc: the requested link configuration
1462 * Set up a port's MAC and PHY according to a desired link configuration.
1463 * - If the PHY can auto-negotiate first decide what to advertise, then
1464 * enable/disable auto-negotiation as desired, and reset.
1465 * - If the PHY does not auto-negotiate just reset it.
1466 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1467 * otherwise do it later based on the outcome of auto-negotiation.
1469 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1471 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1474 if (lc->supported & SUPPORTED_Autoneg) {
1475 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1477 lc->advertising |= ADVERTISED_Asym_Pause;
1479 lc->advertising |= ADVERTISED_Pause;
1481 phy->ops->advertise(phy, lc->advertising);
1483 if (lc->autoneg == AUTONEG_DISABLE) {
1484 lc->speed = lc->requested_speed;
1485 lc->duplex = lc->requested_duplex;
1486 lc->fc = (unsigned char)fc;
1487 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1489 /* Also disables autoneg */
1490 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1491 /* PR 5666. Power phy up when doing an ifup */
1492 if (!is_10G(phy->adapter))
1493 phy->ops->power_down(phy, 0);
1495 phy->ops->autoneg_enable(phy);
1497 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1498 lc->fc = (unsigned char)fc;
1499 phy->ops->reset(phy, 0);
1505 * t3_set_vlan_accel - control HW VLAN extraction
1506 * @adapter: the adapter
1507 * @ports: bitmap of adapter ports to operate on
1508 * @on: enable (1) or disable (0) HW VLAN extraction
1510 * Enables or disables HW extraction of VLAN tags for the given port.
1512 void t3_set_vlan_accel(adapter_t *adapter, unsigned int ports, int on)
1514 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1515 ports << S_VLANEXTRACTIONENABLE,
1516 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1520 unsigned int mask; /* bits to check in interrupt status */
1521 const char *msg; /* message to print or NULL */
1522 short stat_idx; /* stat counter to increment or -1 */
1523 unsigned short fatal; /* whether the condition reported is fatal */
1527 * t3_handle_intr_status - table driven interrupt handler
1528 * @adapter: the adapter that generated the interrupt
1529 * @reg: the interrupt status register to process
1530 * @mask: a mask to apply to the interrupt status
1531 * @acts: table of interrupt actions
1532 * @stats: statistics counters tracking interrupt occurences
1534 * A table driven interrupt handler that applies a set of masks to an
1535 * interrupt status word and performs the corresponding actions if the
1536 * interrupts described by the mask have occured. The actions include
1537 * optionally printing a warning or alert message, and optionally
1538 * incrementing a stat counter. The table is terminated by an entry
1539 * specifying mask 0. Returns the number of fatal interrupt conditions.
1541 static int t3_handle_intr_status(adapter_t *adapter, unsigned int reg,
1543 const struct intr_info *acts,
1544 unsigned long *stats)
1547 unsigned int status = t3_read_reg(adapter, reg) & mask;
1549 for ( ; acts->mask; ++acts) {
1550 if (!(status & acts->mask)) continue;
1553 CH_ALERT(adapter, "%s (0x%x)\n",
1554 acts->msg, status & acts->mask);
1555 } else if (acts->msg)
1556 CH_WARN(adapter, "%s (0x%x)\n",
1557 acts->msg, status & acts->mask);
1558 if (acts->stat_idx >= 0)
1559 stats[acts->stat_idx]++;
1561 if (status) /* clear processed interrupts */
1562 t3_write_reg(adapter, reg, status);
1566 #define SGE_INTR_MASK (F_RSPQDISABLED | \
1567 F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1568 F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1569 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1570 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1571 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1573 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1574 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1576 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1577 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1578 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1580 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1581 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1582 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1583 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1584 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1585 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1586 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1587 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1588 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1589 F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1590 F_TXPARERR | V_BISTERR(M_BISTERR))
1591 #define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1592 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1593 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1594 #define ULPTX_INTR_MASK 0xfc
1595 #define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1596 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1597 F_ZERO_SWITCH_ERROR)
1598 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1599 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1600 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1601 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1602 F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1603 F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1604 F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1605 F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1606 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1607 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1608 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1609 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1610 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1611 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1612 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1613 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1614 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1615 V_MCAPARERRENB(M_MCAPARERRENB))
1616 #define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE)
1617 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1618 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1619 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1620 F_MPS0 | F_CPL_SWITCH)
1622 * Interrupt handler for the PCIX1 module.
1624 static void pci_intr_handler(adapter_t *adapter)
1626 static struct intr_info pcix1_intr_info[] = {
1627 { F_MSTDETPARERR, "PCI master detected parity error", -1, 1 },
1628 { F_SIGTARABT, "PCI signaled target abort", -1, 1 },
1629 { F_RCVTARABT, "PCI received target abort", -1, 1 },
1630 { F_RCVMSTABT, "PCI received master abort", -1, 1 },
1631 { F_SIGSYSERR, "PCI signaled system error", -1, 1 },
1632 { F_DETPARERR, "PCI detected parity error", -1, 1 },
1633 { F_SPLCMPDIS, "PCI split completion discarded", -1, 1 },
1634 { F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1 },
1635 { F_RCVSPLCMPERR, "PCI received split completion error", -1,
1637 { F_DETCORECCERR, "PCI correctable ECC error",
1638 STAT_PCI_CORR_ECC, 0 },
1639 { F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1 },
1640 { F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1641 { V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1643 { V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1645 { V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1647 { V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1652 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1653 pcix1_intr_info, adapter->irq_stats))
1654 t3_fatal_err(adapter);
1658 * Interrupt handler for the PCIE module.
1660 static void pcie_intr_handler(adapter_t *adapter)
1662 static struct intr_info pcie_intr_info[] = {
1663 { F_PEXERR, "PCI PEX error", -1, 1 },
1665 "PCI unexpected split completion DMA read error", -1, 1 },
1667 "PCI unexpected split completion DMA command error", -1, 1 },
1668 { F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1669 { F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1 },
1670 { F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1 },
1671 { F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1 },
1672 { V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1673 "PCI MSI-X table/PBA parity error", -1, 1 },
1674 { F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1 },
1675 { F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1 },
1676 { F_RXPARERR, "PCI Rx parity error", -1, 1 },
1677 { F_TXPARERR, "PCI Tx parity error", -1, 1 },
1678 { V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1 },
1682 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1683 CH_ALERT(adapter, "PEX error code 0x%x\n",
1684 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1686 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1687 pcie_intr_info, adapter->irq_stats))
1688 t3_fatal_err(adapter);
1692 * TP interrupt handler.
1694 static void tp_intr_handler(adapter_t *adapter)
1696 static struct intr_info tp_intr_info[] = {
1697 { 0xffffff, "TP parity error", -1, 1 },
1698 { 0x1000000, "TP out of Rx pages", -1, 1 },
1699 { 0x2000000, "TP out of Tx pages", -1, 1 },
1702 static struct intr_info tp_intr_info_t3c[] = {
1703 { 0x1fffffff, "TP parity error", -1, 1 },
1704 { F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1 },
1705 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1709 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1710 adapter->params.rev < T3_REV_C ?
1711 tp_intr_info : tp_intr_info_t3c, NULL))
1712 t3_fatal_err(adapter);
1716 * CIM interrupt handler.
1718 static void cim_intr_handler(adapter_t *adapter)
1720 static struct intr_info cim_intr_info[] = {
1721 { F_RSVDSPACEINT, "CIM reserved space write", -1, 1 },
1722 { F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1 },
1723 { F_FLASHRANGEINT, "CIM flash address out of range", -1, 1 },
1724 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1725 { F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1 },
1726 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1727 { F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1 },
1728 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1729 { F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1 },
1730 { F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1 },
1731 { F_BLKRDPLINT, "CIM block read from PL space", -1, 1 },
1732 { F_BLKWRPLINT, "CIM block write to PL space", -1, 1 },
1733 { F_DRAMPARERR, "CIM DRAM parity error", -1, 1 },
1734 { F_ICACHEPARERR, "CIM icache parity error", -1, 1 },
1735 { F_DCACHEPARERR, "CIM dcache parity error", -1, 1 },
1736 { F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1 },
1737 { F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1 },
1738 { F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1 },
1739 { F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1 },
1740 { F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1 },
1741 { F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1 },
1742 { F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1 },
1743 { F_ITAGPARERR, "CIM itag parity error", -1, 1 },
1744 { F_DTAGPARERR, "CIM dtag parity error", -1, 1 },
1748 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, CIM_INTR_MASK,
1749 cim_intr_info, NULL))
1750 t3_fatal_err(adapter);
1754 * ULP RX interrupt handler.
1756 static void ulprx_intr_handler(adapter_t *adapter)
1758 static struct intr_info ulprx_intr_info[] = {
1759 { F_PARERRDATA, "ULP RX data parity error", -1, 1 },
1760 { F_PARERRPCMD, "ULP RX command parity error", -1, 1 },
1761 { F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1 },
1762 { F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1 },
1763 { F_ARBFPERR, "ULP RX ArbF parity error", -1, 1 },
1764 { F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1 },
1765 { F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1 },
1766 { F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1 },
1770 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1771 ulprx_intr_info, NULL))
1772 t3_fatal_err(adapter);
1776 * ULP TX interrupt handler.
1778 static void ulptx_intr_handler(adapter_t *adapter)
1780 static struct intr_info ulptx_intr_info[] = {
1781 { F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1782 STAT_ULP_CH0_PBL_OOB, 0 },
1783 { F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1784 STAT_ULP_CH1_PBL_OOB, 0 },
1785 { 0xfc, "ULP TX parity error", -1, 1 },
1789 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1790 ulptx_intr_info, adapter->irq_stats))
1791 t3_fatal_err(adapter);
1794 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1795 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1796 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1797 F_ICSPI1_TX_FRAMING_ERROR)
1798 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1799 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1800 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1801 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1804 * PM TX interrupt handler.
1806 static void pmtx_intr_handler(adapter_t *adapter)
1808 static struct intr_info pmtx_intr_info[] = {
1809 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1810 { ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1 },
1811 { OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1 },
1812 { V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1813 "PMTX ispi parity error", -1, 1 },
1814 { V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1815 "PMTX ospi parity error", -1, 1 },
1819 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1820 pmtx_intr_info, NULL))
1821 t3_fatal_err(adapter);
1824 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1825 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1826 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1827 F_IESPI1_TX_FRAMING_ERROR)
1828 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1829 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1830 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1831 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1834 * PM RX interrupt handler.
1836 static void pmrx_intr_handler(adapter_t *adapter)
1838 static struct intr_info pmrx_intr_info[] = {
1839 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1840 { IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1 },
1841 { OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1 },
1842 { V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1843 "PMRX ispi parity error", -1, 1 },
1844 { V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1845 "PMRX ospi parity error", -1, 1 },
1849 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1850 pmrx_intr_info, NULL))
1851 t3_fatal_err(adapter);
1855 * CPL switch interrupt handler.
1857 static void cplsw_intr_handler(adapter_t *adapter)
1859 static struct intr_info cplsw_intr_info[] = {
1860 { F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1 },
1861 { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 },
1862 { F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1 },
1863 { F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1 },
1864 { F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1 },
1865 { F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1 },
1869 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1870 cplsw_intr_info, NULL))
1871 t3_fatal_err(adapter);
1875 * MPS interrupt handler.
1877 static void mps_intr_handler(adapter_t *adapter)
1879 static struct intr_info mps_intr_info[] = {
1880 { 0x1ff, "MPS parity error", -1, 1 },
1884 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1885 mps_intr_info, NULL))
1886 t3_fatal_err(adapter);
1889 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1892 * MC7 interrupt handler.
1894 static void mc7_intr_handler(struct mc7 *mc7)
1896 adapter_t *adapter = mc7->adapter;
1897 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1900 mc7->stats.corr_err++;
1901 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1902 "data 0x%x 0x%x 0x%x\n", mc7->name,
1903 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1904 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1905 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1906 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1910 mc7->stats.uncorr_err++;
1911 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1912 "data 0x%x 0x%x 0x%x\n", mc7->name,
1913 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1914 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1915 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1916 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1920 mc7->stats.parity_err++;
1921 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1922 mc7->name, G_PE(cause));
1928 if (adapter->params.rev > 0)
1929 addr = t3_read_reg(adapter,
1930 mc7->offset + A_MC7_ERR_ADDR);
1931 mc7->stats.addr_err++;
1932 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1936 if (cause & MC7_INTR_FATAL)
1937 t3_fatal_err(adapter);
1939 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1942 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1943 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1945 * XGMAC interrupt handler.
1947 static int mac_intr_handler(adapter_t *adap, unsigned int idx)
1950 struct port_info *pi;
1953 idx = idx == 0 ? 0 : adapter_info(adap)->nports0; /* MAC idx -> port */
1954 pi = adap2pinfo(adap, idx);
1958 * We mask out interrupt causes for which we're not taking interrupts.
1959 * This allows us to use polling logic to monitor some of the other
1960 * conditions when taking interrupts would impose too much load on the
1963 cause = (t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset)
1964 & ~(F_RXFIFO_OVERFLOW));
1966 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1967 mac->stats.tx_fifo_parity_err++;
1968 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1970 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1971 mac->stats.rx_fifo_parity_err++;
1972 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1974 if (cause & F_TXFIFO_UNDERRUN)
1975 mac->stats.tx_fifo_urun++;
1976 if (cause & F_RXFIFO_OVERFLOW)
1977 mac->stats.rx_fifo_ovfl++;
1978 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1979 mac->stats.serdes_signal_loss++;
1980 if (cause & F_XAUIPCSCTCERR)
1981 mac->stats.xaui_pcs_ctc_err++;
1982 if (cause & F_XAUIPCSALIGNCHANGE)
1983 mac->stats.xaui_pcs_align_change++;
1984 if (cause & F_XGM_INT) {
1985 t3_set_reg_field(adap,
1986 A_XGM_INT_ENABLE + mac->offset,
1989 /* link fault suspected */
1990 pi->link_fault = LF_MAYBE;
1993 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1995 if (cause & XGM_INTR_FATAL)
2002 * Interrupt handler for PHY events.
2004 int t3_phy_intr_handler(adapter_t *adapter)
2006 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
2008 for_each_port(adapter, i) {
2009 struct port_info *p = adap2pinfo(adapter, i);
2011 if (!(p->phy.caps & SUPPORTED_IRQ))
2014 if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
2015 int phy_cause = p->phy.ops->intr_handler(&p->phy);
2017 if (phy_cause & cphy_cause_link_change)
2018 t3_link_changed(adapter, i);
2019 if (phy_cause & cphy_cause_fifo_error)
2020 p->phy.fifo_errors++;
2021 if (phy_cause & cphy_cause_module_change)
2022 t3_os_phymod_changed(adapter, i);
2026 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
2031 * t3_slow_intr_handler - control path interrupt handler
2032 * @adapter: the adapter
2034 * T3 interrupt handler for non-data interrupt events, e.g., errors.
2035 * The designation 'slow' is because it involves register reads, while
2036 * data interrupts typically don't involve any MMIOs.
2038 int t3_slow_intr_handler(adapter_t *adapter)
2040 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
2042 cause &= adapter->slow_intr_mask;
2045 if (cause & F_PCIM0) {
2046 if (is_pcie(adapter))
2047 pcie_intr_handler(adapter);
2049 pci_intr_handler(adapter);
2052 t3_sge_err_intr_handler(adapter);
2053 if (cause & F_MC7_PMRX)
2054 mc7_intr_handler(&adapter->pmrx);
2055 if (cause & F_MC7_PMTX)
2056 mc7_intr_handler(&adapter->pmtx);
2057 if (cause & F_MC7_CM)
2058 mc7_intr_handler(&adapter->cm);
2060 cim_intr_handler(adapter);
2062 tp_intr_handler(adapter);
2063 if (cause & F_ULP2_RX)
2064 ulprx_intr_handler(adapter);
2065 if (cause & F_ULP2_TX)
2066 ulptx_intr_handler(adapter);
2067 if (cause & F_PM1_RX)
2068 pmrx_intr_handler(adapter);
2069 if (cause & F_PM1_TX)
2070 pmtx_intr_handler(adapter);
2071 if (cause & F_CPL_SWITCH)
2072 cplsw_intr_handler(adapter);
2074 mps_intr_handler(adapter);
2076 t3_mc5_intr_handler(&adapter->mc5);
2077 if (cause & F_XGMAC0_0)
2078 mac_intr_handler(adapter, 0);
2079 if (cause & F_XGMAC0_1)
2080 mac_intr_handler(adapter, 1);
2081 if (cause & F_T3DBG)
2082 t3_os_ext_intr_handler(adapter);
2084 /* Clear the interrupts just processed. */
2085 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
2086 (void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
2090 static unsigned int calc_gpio_intr(adapter_t *adap)
2092 unsigned int i, gpi_intr = 0;
2094 for_each_port(adap, i)
2095 if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
2096 adapter_info(adap)->gpio_intr[i])
2097 gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
2102 * t3_intr_enable - enable interrupts
2103 * @adapter: the adapter whose interrupts should be enabled
2105 * Enable interrupts by setting the interrupt enable registers of the
2106 * various HW modules and then enabling the top-level interrupt
2109 void t3_intr_enable(adapter_t *adapter)
2111 static struct addr_val_pair intr_en_avp[] = {
2112 { A_MC7_INT_ENABLE, MC7_INTR_MASK },
2113 { A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2115 { A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2117 { A_MC5_DB_INT_ENABLE, MC5_INTR_MASK },
2118 { A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK },
2119 { A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK },
2120 { A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK },
2121 { A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK },
2122 { A_MPS_INT_ENABLE, MPS_INTR_MASK },
2125 adapter->slow_intr_mask = PL_INTR_MASK;
2127 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
2128 t3_write_reg(adapter, A_TP_INT_ENABLE,
2129 adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
2130 t3_write_reg(adapter, A_SG_INT_ENABLE, SGE_INTR_MASK);
2132 if (adapter->params.rev > 0) {
2133 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
2134 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
2135 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
2136 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
2137 F_PBL_BOUND_ERR_CH1);
2139 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
2140 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
2143 t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
2145 if (is_pcie(adapter))
2146 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
2148 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
2149 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
2150 (void) t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
2154 * t3_intr_disable - disable a card's interrupts
2155 * @adapter: the adapter whose interrupts should be disabled
2157 * Disable interrupts. We only disable the top-level interrupt
2158 * concentrator and the SGE data interrupts.
2160 void t3_intr_disable(adapter_t *adapter)
2162 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
2163 (void) t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
2164 adapter->slow_intr_mask = 0;
2168 * t3_intr_clear - clear all interrupts
2169 * @adapter: the adapter whose interrupts should be cleared
2171 * Clears all interrupts.
2173 void t3_intr_clear(adapter_t *adapter)
2175 static const unsigned int cause_reg_addr[] = {
2177 A_SG_RSPQ_FL_STATUS,
2180 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2181 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2182 A_CIM_HOST_INT_CAUSE,
2195 /* Clear PHY and MAC interrupts for each port. */
2196 for_each_port(adapter, i)
2197 t3_port_intr_clear(adapter, i);
2199 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
2200 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
2202 if (is_pcie(adapter))
2203 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
2204 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
2205 (void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
2208 void t3_xgm_intr_enable(adapter_t *adapter, int idx)
2210 struct port_info *pi = adap2pinfo(adapter, idx);
2212 t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset,
2213 XGM_EXTRA_INTR_MASK);
2216 void t3_xgm_intr_disable(adapter_t *adapter, int idx)
2218 struct port_info *pi = adap2pinfo(adapter, idx);
2220 t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset,
2225 * t3_port_intr_enable - enable port-specific interrupts
2226 * @adapter: associated adapter
2227 * @idx: index of port whose interrupts should be enabled
2229 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
2232 void t3_port_intr_enable(adapter_t *adapter, int idx)
2234 struct port_info *pi = adap2pinfo(adapter, idx);
2236 t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, XGM_INTR_MASK);
2237 pi->phy.ops->intr_enable(&pi->phy);
2241 * t3_port_intr_disable - disable port-specific interrupts
2242 * @adapter: associated adapter
2243 * @idx: index of port whose interrupts should be disabled
2245 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
2248 void t3_port_intr_disable(adapter_t *adapter, int idx)
2250 struct port_info *pi = adap2pinfo(adapter, idx);
2252 t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, 0);
2253 pi->phy.ops->intr_disable(&pi->phy);
2257 * t3_port_intr_clear - clear port-specific interrupts
2258 * @adapter: associated adapter
2259 * @idx: index of port whose interrupts to clear
2261 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
2264 void t3_port_intr_clear(adapter_t *adapter, int idx)
2266 struct port_info *pi = adap2pinfo(adapter, idx);
2268 t3_write_reg(adapter, A_XGM_INT_CAUSE + pi->mac.offset, 0xffffffff);
2269 pi->phy.ops->intr_clear(&pi->phy);
2272 #define SG_CONTEXT_CMD_ATTEMPTS 100
2275 * t3_sge_write_context - write an SGE context
2276 * @adapter: the adapter
2277 * @id: the context id
2278 * @type: the context type
2280 * Program an SGE context with the values already loaded in the
2281 * CONTEXT_DATA? registers.
2283 static int t3_sge_write_context(adapter_t *adapter, unsigned int id,
2286 if (type == F_RESPONSEQ) {
2288 * Can't write the Response Queue Context bits for
2289 * Interrupt Armed or the Reserve bits after the chip
2290 * has been initialized out of reset. Writing to these
2291 * bits can confuse the hardware.
2293 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2294 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2295 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
2296 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2298 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2299 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2300 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2301 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2303 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2304 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2305 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2306 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2310 * clear_sge_ctxt - completely clear an SGE context
2311 * @adapter: the adapter
2312 * @id: the context id
2313 * @type: the context type
2315 * Completely clear an SGE context. Used predominantly at post-reset
2316 * initialization. Note in particular that we don't skip writing to any
2317 * "sensitive bits" in the contexts the way that t3_sge_write_context()
2320 static int clear_sge_ctxt(adapter_t *adap, unsigned int id, unsigned int type)
2322 t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2323 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2324 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2325 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2326 t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
2327 t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
2328 t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
2329 t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
2330 t3_write_reg(adap, A_SG_CONTEXT_CMD,
2331 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2332 return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2333 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2337 * t3_sge_init_ecntxt - initialize an SGE egress context
2338 * @adapter: the adapter to configure
2339 * @id: the context id
2340 * @gts_enable: whether to enable GTS for the context
2341 * @type: the egress context type
2342 * @respq: associated response queue
2343 * @base_addr: base address of queue
2344 * @size: number of queue entries
2346 * @gen: initial generation value for the context
2347 * @cidx: consumer pointer
2349 * Initialize an SGE egress context and make it ready for use. If the
2350 * platform allows concurrent context operations, the caller is
2351 * responsible for appropriate locking.
2353 int t3_sge_init_ecntxt(adapter_t *adapter, unsigned int id, int gts_enable,
2354 enum sge_context_type type, int respq, u64 base_addr,
2355 unsigned int size, unsigned int token, int gen,
2358 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2360 if (base_addr & 0xfff) /* must be 4K aligned */
2362 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2366 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2367 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2368 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2369 V_EC_BASE_LO((u32)base_addr & 0xffff));
2371 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, (u32)base_addr);
2373 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2374 V_EC_BASE_HI((u32)base_addr & 0xf) | V_EC_RESPQ(respq) |
2375 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2377 return t3_sge_write_context(adapter, id, F_EGRESS);
2381 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2382 * @adapter: the adapter to configure
2383 * @id: the context id
2384 * @gts_enable: whether to enable GTS for the context
2385 * @base_addr: base address of queue
2386 * @size: number of queue entries
2387 * @bsize: size of each buffer for this queue
2388 * @cong_thres: threshold to signal congestion to upstream producers
2389 * @gen: initial generation value for the context
2390 * @cidx: consumer pointer
2392 * Initialize an SGE free list context and make it ready for use. The
2393 * caller is responsible for ensuring only one context operation occurs
2396 int t3_sge_init_flcntxt(adapter_t *adapter, unsigned int id, int gts_enable,
2397 u64 base_addr, unsigned int size, unsigned int bsize,
2398 unsigned int cong_thres, int gen, unsigned int cidx)
2400 if (base_addr & 0xfff) /* must be 4K aligned */
2402 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2406 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, (u32)base_addr);
2408 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2409 V_FL_BASE_HI((u32)base_addr) |
2410 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2411 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2412 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2413 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2414 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2415 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2416 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2417 return t3_sge_write_context(adapter, id, F_FREELIST);
2421 * t3_sge_init_rspcntxt - initialize an SGE response queue context
2422 * @adapter: the adapter to configure
2423 * @id: the context id
2424 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2425 * @base_addr: base address of queue
2426 * @size: number of queue entries
2427 * @fl_thres: threshold for selecting the normal or jumbo free list
2428 * @gen: initial generation value for the context
2429 * @cidx: consumer pointer
2431 * Initialize an SGE response queue context and make it ready for use.
2432 * The caller is responsible for ensuring only one context operation
2435 int t3_sge_init_rspcntxt(adapter_t *adapter, unsigned int id, int irq_vec_idx,
2436 u64 base_addr, unsigned int size,
2437 unsigned int fl_thres, int gen, unsigned int cidx)
2439 unsigned int ctrl, intr = 0;
2441 if (base_addr & 0xfff) /* must be 4K aligned */
2443 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2447 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2449 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
2451 ctrl = t3_read_reg(adapter, A_SG_CONTROL);
2452 if ((irq_vec_idx > 0) ||
2453 ((irq_vec_idx == 0) && !(ctrl & F_ONEINTMULTQ)))
2454 intr = F_RQ_INTR_EN;
2455 if (irq_vec_idx >= 0)
2456 intr |= V_RQ_MSI_VEC(irq_vec_idx);
2457 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2458 V_CQ_BASE_HI((u32)base_addr) | intr | V_RQ_GEN(gen));
2459 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2460 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2464 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2465 * @adapter: the adapter to configure
2466 * @id: the context id
2467 * @base_addr: base address of queue
2468 * @size: number of queue entries
2469 * @rspq: response queue for async notifications
2470 * @ovfl_mode: CQ overflow mode
2471 * @credits: completion queue credits
2472 * @credit_thres: the credit threshold
2474 * Initialize an SGE completion queue context and make it ready for use.
2475 * The caller is responsible for ensuring only one context operation
2478 int t3_sge_init_cqcntxt(adapter_t *adapter, unsigned int id, u64 base_addr,
2479 unsigned int size, int rspq, int ovfl_mode,
2480 unsigned int credits, unsigned int credit_thres)
2482 if (base_addr & 0xfff) /* must be 4K aligned */
2484 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2488 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2489 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
2491 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2492 V_CQ_BASE_HI((u32)base_addr) | V_CQ_RSPQ(rspq) |
2493 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2494 V_CQ_ERR(ovfl_mode));
2495 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2496 V_CQ_CREDIT_THRES(credit_thres));
2497 return t3_sge_write_context(adapter, id, F_CQ);
2501 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2502 * @adapter: the adapter
2503 * @id: the egress context id
2504 * @enable: enable (1) or disable (0) the context
2506 * Enable or disable an SGE egress context. The caller is responsible for
2507 * ensuring only one context operation occurs at a time.
2509 int t3_sge_enable_ecntxt(adapter_t *adapter, unsigned int id, int enable)
2511 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2514 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2515 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2516 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2517 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2518 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2519 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2520 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2521 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2522 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2526 * t3_sge_disable_fl - disable an SGE free-buffer list
2527 * @adapter: the adapter
2528 * @id: the free list context id
2530 * Disable an SGE free-buffer list. The caller is responsible for
2531 * ensuring only one context operation occurs at a time.
2533 int t3_sge_disable_fl(adapter_t *adapter, unsigned int id)
2535 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2538 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2539 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2540 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2541 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2542 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2543 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2544 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2545 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2546 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2550 * t3_sge_disable_rspcntxt - disable an SGE response queue
2551 * @adapter: the adapter
2552 * @id: the response queue context id
2554 * Disable an SGE response queue. The caller is responsible for
2555 * ensuring only one context operation occurs at a time.
2557 int t3_sge_disable_rspcntxt(adapter_t *adapter, unsigned int id)
2559 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2562 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2563 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2564 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2565 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2566 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2567 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2568 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2569 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2570 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2574 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2575 * @adapter: the adapter
2576 * @id: the completion queue context id
2578 * Disable an SGE completion queue. The caller is responsible for
2579 * ensuring only one context operation occurs at a time.
2581 int t3_sge_disable_cqcntxt(adapter_t *adapter, unsigned int id)
2583 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2586 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2587 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2588 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2589 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2590 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2591 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2592 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2593 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2594 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2598 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2599 * @adapter: the adapter
2600 * @id: the context id
2601 * @op: the operation to perform
2602 * @credits: credits to return to the CQ
2604 * Perform the selected operation on an SGE completion queue context.
2605 * The caller is responsible for ensuring only one context operation
2608 * For most operations the function returns the current HW position in
2609 * the completion queue.
2611 int t3_sge_cqcntxt_op(adapter_t *adapter, unsigned int id, unsigned int op,
2612 unsigned int credits)
2616 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2619 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2620 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2621 V_CONTEXT(id) | F_CQ);
2622 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2623 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2626 if (op >= 2 && op < 7) {
2627 if (adapter->params.rev > 0)
2628 return G_CQ_INDEX(val);
2630 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2631 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2632 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2633 F_CONTEXT_CMD_BUSY, 0,
2634 SG_CONTEXT_CMD_ATTEMPTS, 1))
2636 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2642 * t3_sge_read_context - read an SGE context
2643 * @type: the context type
2644 * @adapter: the adapter
2645 * @id: the context id
2646 * @data: holds the retrieved context
2648 * Read an SGE egress context. The caller is responsible for ensuring
2649 * only one context operation occurs at a time.
2651 static int t3_sge_read_context(unsigned int type, adapter_t *adapter,
2652 unsigned int id, u32 data[4])
2654 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2657 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2658 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2659 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2660 SG_CONTEXT_CMD_ATTEMPTS, 1))
2662 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2663 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2664 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2665 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2670 * t3_sge_read_ecntxt - read an SGE egress context
2671 * @adapter: the adapter
2672 * @id: the context id
2673 * @data: holds the retrieved context
2675 * Read an SGE egress context. The caller is responsible for ensuring
2676 * only one context operation occurs at a time.
2678 int t3_sge_read_ecntxt(adapter_t *adapter, unsigned int id, u32 data[4])
2682 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2686 * t3_sge_read_cq - read an SGE CQ context
2687 * @adapter: the adapter
2688 * @id: the context id
2689 * @data: holds the retrieved context
2691 * Read an SGE CQ context. The caller is responsible for ensuring
2692 * only one context operation occurs at a time.
2694 int t3_sge_read_cq(adapter_t *adapter, unsigned int id, u32 data[4])
2698 return t3_sge_read_context(F_CQ, adapter, id, data);
2702 * t3_sge_read_fl - read an SGE free-list context
2703 * @adapter: the adapter
2704 * @id: the context id
2705 * @data: holds the retrieved context
2707 * Read an SGE free-list context. The caller is responsible for ensuring
2708 * only one context operation occurs at a time.
2710 int t3_sge_read_fl(adapter_t *adapter, unsigned int id, u32 data[4])
2712 if (id >= SGE_QSETS * 2)
2714 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2718 * t3_sge_read_rspq - read an SGE response queue context
2719 * @adapter: the adapter
2720 * @id: the context id
2721 * @data: holds the retrieved context
2723 * Read an SGE response queue context. The caller is responsible for
2724 * ensuring only one context operation occurs at a time.
2726 int t3_sge_read_rspq(adapter_t *adapter, unsigned int id, u32 data[4])
2728 if (id >= SGE_QSETS)
2730 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2734 * t3_config_rss - configure Rx packet steering
2735 * @adapter: the adapter
2736 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2737 * @cpus: values for the CPU lookup table (0xff terminated)
2738 * @rspq: values for the response queue lookup table (0xffff terminated)
2740 * Programs the receive packet steering logic. @cpus and @rspq provide
2741 * the values for the CPU and response queue lookup tables. If they
2742 * provide fewer values than the size of the tables the supplied values
2743 * are used repeatedly until the tables are fully populated.
2745 void t3_config_rss(adapter_t *adapter, unsigned int rss_config, const u8 *cpus,
2748 int i, j, cpu_idx = 0, q_idx = 0;
2751 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2754 for (j = 0; j < 2; ++j) {
2755 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2756 if (cpus[cpu_idx] == 0xff)
2759 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2763 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2764 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2765 (i << 16) | rspq[q_idx++]);
2766 if (rspq[q_idx] == 0xffff)
2770 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2774 * t3_read_rss - read the contents of the RSS tables
2775 * @adapter: the adapter
2776 * @lkup: holds the contents of the RSS lookup table
2777 * @map: holds the contents of the RSS map table
2779 * Reads the contents of the receive packet steering tables.
2781 int t3_read_rss(adapter_t *adapter, u8 *lkup, u16 *map)
2787 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2788 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2790 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2791 if (!(val & 0x80000000))
2794 *lkup++ = (u8)(val >> 8);
2798 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2799 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2801 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2802 if (!(val & 0x80000000))
2810 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2811 * @adap: the adapter
2812 * @enable: 1 to select offload mode, 0 for regular NIC
2814 * Switches TP to NIC/offload mode.
2816 void t3_tp_set_offload_mode(adapter_t *adap, int enable)
2818 if (is_offload(adap) || !enable)
2819 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2820 V_NICMODE(!enable));
2824 * tp_wr_bits_indirect - set/clear bits in an indirect TP register
2825 * @adap: the adapter
2826 * @addr: the indirect TP register address
2827 * @mask: specifies the field within the register to modify
2828 * @val: new value for the field
2830 * Sets a field of an indirect TP register to the given value.
2832 static void tp_wr_bits_indirect(adapter_t *adap, unsigned int addr,
2833 unsigned int mask, unsigned int val)
2835 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2836 val |= t3_read_reg(adap, A_TP_PIO_DATA) & ~mask;
2837 t3_write_reg(adap, A_TP_PIO_DATA, val);
2841 * t3_enable_filters - enable the HW filters
2842 * @adap: the adapter
2844 * Enables the HW filters for NIC traffic.
2846 void t3_enable_filters(adapter_t *adap)
2848 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE, 0);
2849 t3_set_reg_field(adap, A_MC5_DB_CONFIG, 0, F_FILTEREN);
2850 t3_set_reg_field(adap, A_TP_GLOBAL_CONFIG, 0, V_FIVETUPLELOOKUP(3));
2851 tp_wr_bits_indirect(adap, A_TP_INGRESS_CONFIG, 0, F_LOOKUPEVERYPKT);
2855 * t3_disable_filters - disable the HW filters
2856 * @adap: the adapter
2858 * Disables the HW filters for NIC traffic.
2860 void t3_disable_filters(adapter_t *adap)
2862 /* note that we don't want to revert to NIC-only mode */
2863 t3_set_reg_field(adap, A_MC5_DB_CONFIG, F_FILTEREN, 0);
2864 t3_set_reg_field(adap, A_TP_GLOBAL_CONFIG,
2865 V_FIVETUPLELOOKUP(M_FIVETUPLELOOKUP), 0);
2866 tp_wr_bits_indirect(adap, A_TP_INGRESS_CONFIG, F_LOOKUPEVERYPKT, 0);
2870 * pm_num_pages - calculate the number of pages of the payload memory
2871 * @mem_size: the size of the payload memory
2872 * @pg_size: the size of each payload memory page
2874 * Calculate the number of pages, each of the given size, that fit in a
2875 * memory of the specified size, respecting the HW requirement that the
2876 * number of pages must be a multiple of 24.
2878 static inline unsigned int pm_num_pages(unsigned int mem_size,
2879 unsigned int pg_size)
2881 unsigned int n = mem_size / pg_size;
2886 #define mem_region(adap, start, size, reg) \
2887 t3_write_reg((adap), A_ ## reg, (start)); \
2891 * partition_mem - partition memory and configure TP memory settings
2892 * @adap: the adapter
2893 * @p: the TP parameters
2895 * Partitions context and payload memory and configures TP's memory
2898 static void partition_mem(adapter_t *adap, const struct tp_params *p)
2900 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2901 unsigned int timers = 0, timers_shift = 22;
2903 if (adap->params.rev > 0) {
2904 if (tids <= 16 * 1024) {
2907 } else if (tids <= 64 * 1024) {
2910 } else if (tids <= 256 * 1024) {
2916 t3_write_reg(adap, A_TP_PMM_SIZE,
2917 p->chan_rx_size | (p->chan_tx_size >> 16));
2919 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2920 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2921 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2922 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2923 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2925 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2926 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2927 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2929 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2930 /* Add a bit of headroom and make multiple of 24 */
2932 pstructs -= pstructs % 24;
2933 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2935 m = tids * TCB_SIZE;
2936 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2937 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2938 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2939 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2940 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2941 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2942 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2943 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2945 m = (m + 4095) & ~0xfff;
2946 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2947 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2949 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2950 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2951 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2953 adap->params.mc5.nservers += m - tids;
2956 static inline void tp_wr_indirect(adapter_t *adap, unsigned int addr, u32 val)
2958 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2959 t3_write_reg(adap, A_TP_PIO_DATA, val);
2962 static inline u32 tp_rd_indirect(adapter_t *adap, unsigned int addr)
2964 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2965 return t3_read_reg(adap, A_TP_PIO_DATA);
2968 static void tp_config(adapter_t *adap, const struct tp_params *p)
2970 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2971 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2972 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2973 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2974 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2975 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2976 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2977 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2978 V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
2979 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2980 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
2981 F_IPV6ENABLE | F_NICMODE);
2982 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2983 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2984 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2985 adap->params.rev > 0 ? F_ENABLEESND :
2987 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2989 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2990 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2991 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2992 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2993 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
2994 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2995 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2997 if (adap->params.rev > 0) {
2998 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2999 t3_set_reg_field(adap, A_TP_PARA_REG3, 0,
3000 F_TXPACEAUTO | F_TXPACEAUTOSTRICT);
3001 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
3002 tp_wr_indirect(adap, A_TP_VLAN_PRI_MAP, 0xfa50);
3003 tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP0, 0xfac688);
3004 tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP1, 0xfac688);
3006 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
3008 if (adap->params.rev == T3_REV_C)
3009 t3_set_reg_field(adap, A_TP_PC_CONFIG,
3010 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
3011 V_TABLELATENCYDELTA(4));
3013 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
3014 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
3015 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
3016 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
3018 if (adap->params.nports > 2) {
3019 t3_set_reg_field(adap, A_TP_PC_CONFIG2, 0,
3020 F_ENABLETXPORTFROMDA2 | F_ENABLETXPORTFROMDA |
3021 F_ENABLERXPORTFROMADDR);
3022 tp_wr_bits_indirect(adap, A_TP_QOS_RX_MAP_MODE,
3023 V_RXMAPMODE(M_RXMAPMODE), 0);
3024 tp_wr_indirect(adap, A_TP_INGRESS_CONFIG, V_BITPOS0(48) |
3025 V_BITPOS1(49) | V_BITPOS2(50) | V_BITPOS3(51) |
3026 F_ENABLEEXTRACT | F_ENABLEEXTRACTIONSFD |
3027 F_ENABLEINSERTION | F_ENABLEINSERTIONSFD);
3028 tp_wr_indirect(adap, A_TP_PREAMBLE_MSB, 0xfb000000);
3029 tp_wr_indirect(adap, A_TP_PREAMBLE_LSB, 0xd5);
3030 tp_wr_indirect(adap, A_TP_INTF_FROM_TX_PKT, F_INTFFROMTXPKT);
3034 /* TCP timer values in ms */
3035 #define TP_DACK_TIMER 50
3036 #define TP_RTO_MIN 250
3039 * tp_set_timers - set TP timing parameters
3040 * @adap: the adapter to set
3041 * @core_clk: the core clock frequency in Hz
3043 * Set TP's timing parameters, such as the various timer resolutions and
3044 * the TCP timer values.
3046 static void tp_set_timers(adapter_t *adap, unsigned int core_clk)
3048 unsigned int tre = adap->params.tp.tre;
3049 unsigned int dack_re = adap->params.tp.dack_re;
3050 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
3051 unsigned int tps = core_clk >> tre;
3053 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
3054 V_DELAYEDACKRESOLUTION(dack_re) |
3055 V_TIMESTAMPRESOLUTION(tstamp_re));
3056 t3_write_reg(adap, A_TP_DACK_TIMER,
3057 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
3058 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
3059 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
3060 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
3061 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
3062 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
3063 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
3064 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
3067 #define SECONDS * tps
3069 t3_write_reg(adap, A_TP_MSL,
3070 adap->params.rev > 0 ? 0 : 2 SECONDS);
3071 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
3072 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
3073 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
3074 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
3075 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
3076 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
3077 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
3078 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
3083 #ifdef CONFIG_CHELSIO_T3_CORE
3085 * t3_tp_set_coalescing_size - set receive coalescing size
3086 * @adap: the adapter
3087 * @size: the receive coalescing size
3088 * @psh: whether a set PSH bit should deliver coalesced data
3090 * Set the receive coalescing size and PSH bit handling.
3092 int t3_tp_set_coalescing_size(adapter_t *adap, unsigned int size, int psh)
3096 if (size > MAX_RX_COALESCING_LEN)
3099 val = t3_read_reg(adap, A_TP_PARA_REG3);
3100 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
3103 val |= F_RXCOALESCEENABLE;
3105 val |= F_RXCOALESCEPSHEN;
3106 size = min(MAX_RX_COALESCING_LEN, size);
3107 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
3108 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
3110 t3_write_reg(adap, A_TP_PARA_REG3, val);
3115 * t3_tp_set_max_rxsize - set the max receive size
3116 * @adap: the adapter
3117 * @size: the max receive size
3119 * Set TP's max receive size. This is the limit that applies when
3120 * receive coalescing is disabled.
3122 void t3_tp_set_max_rxsize(adapter_t *adap, unsigned int size)
3124 t3_write_reg(adap, A_TP_PARA_REG7,
3125 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
3128 static void __devinit init_mtus(unsigned short mtus[])
3131 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
3132 * it can accomodate max size TCP/IP headers when SACK and timestamps
3133 * are enabled and still have at least 8 bytes of payload.
3154 * init_cong_ctrl - initialize congestion control parameters
3155 * @a: the alpha values for congestion control
3156 * @b: the beta values for congestion control
3158 * Initialize the congestion control parameters.
3160 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
3162 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
3187 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
3190 b[13] = b[14] = b[15] = b[16] = 3;
3191 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
3192 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
3197 /* The minimum additive increment value for the congestion control table */
3198 #define CC_MIN_INCR 2U
3201 * t3_load_mtus - write the MTU and congestion control HW tables
3202 * @adap: the adapter
3203 * @mtus: the unrestricted values for the MTU table
3204 * @alpha: the values for the congestion control alpha parameter
3205 * @beta: the values for the congestion control beta parameter
3206 * @mtu_cap: the maximum permitted effective MTU
3208 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
3209 * Update the high-speed congestion control table with the supplied alpha,
3212 void t3_load_mtus(adapter_t *adap, unsigned short mtus[NMTUS],
3213 unsigned short alpha[NCCTRL_WIN],
3214 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
3216 static const unsigned int avg_pkts[NCCTRL_WIN] = {
3217 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
3218 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
3219 28672, 40960, 57344, 81920, 114688, 163840, 229376 };
3223 for (i = 0; i < NMTUS; ++i) {
3224 unsigned int mtu = min(mtus[i], mtu_cap);
3225 unsigned int log2 = fls(mtu);
3227 if (!(mtu & ((1 << log2) >> 2))) /* round */
3229 t3_write_reg(adap, A_TP_MTU_TABLE,
3230 (i << 24) | (log2 << 16) | mtu);
3232 for (w = 0; w < NCCTRL_WIN; ++w) {
3235 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3238 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3239 (w << 16) | (beta[w] << 13) | inc);
3245 * t3_read_hw_mtus - returns the values in the HW MTU table
3246 * @adap: the adapter
3247 * @mtus: where to store the HW MTU values
3249 * Reads the HW MTU table.
3251 void t3_read_hw_mtus(adapter_t *adap, unsigned short mtus[NMTUS])
3255 for (i = 0; i < NMTUS; ++i) {
3258 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
3259 val = t3_read_reg(adap, A_TP_MTU_TABLE);
3260 mtus[i] = val & 0x3fff;
3265 * t3_get_cong_cntl_tab - reads the congestion control table
3266 * @adap: the adapter
3267 * @incr: where to store the alpha values
3269 * Reads the additive increments programmed into the HW congestion
3272 void t3_get_cong_cntl_tab(adapter_t *adap,
3273 unsigned short incr[NMTUS][NCCTRL_WIN])
3275 unsigned int mtu, w;
3277 for (mtu = 0; mtu < NMTUS; ++mtu)
3278 for (w = 0; w < NCCTRL_WIN; ++w) {
3279 t3_write_reg(adap, A_TP_CCTRL_TABLE,
3280 0xffff0000 | (mtu << 5) | w);
3281 incr[mtu][w] = (unsigned short)t3_read_reg(adap,
3282 A_TP_CCTRL_TABLE) & 0x1fff;
3287 * t3_tp_get_mib_stats - read TP's MIB counters
3288 * @adap: the adapter
3289 * @tps: holds the returned counter values
3291 * Returns the values of TP's MIB counters.
3293 void t3_tp_get_mib_stats(adapter_t *adap, struct tp_mib_stats *tps)
3295 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *)tps,
3296 sizeof(*tps) / sizeof(u32), 0);
3300 * t3_read_pace_tbl - read the pace table
3301 * @adap: the adapter
3302 * @pace_vals: holds the returned values
3304 * Returns the values of TP's pace table in nanoseconds.
3306 void t3_read_pace_tbl(adapter_t *adap, unsigned int pace_vals[NTX_SCHED])
3308 unsigned int i, tick_ns = dack_ticks_to_usec(adap, 1000);
3310 for (i = 0; i < NTX_SCHED; i++) {
3311 t3_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
3312 pace_vals[i] = t3_read_reg(adap, A_TP_PACE_TABLE) * tick_ns;
3317 * t3_set_pace_tbl - set the pace table
3318 * @adap: the adapter
3319 * @pace_vals: the pace values in nanoseconds
3320 * @start: index of the first entry in the HW pace table to set
3321 * @n: how many entries to set
3323 * Sets (a subset of the) HW pace table.
3325 void t3_set_pace_tbl(adapter_t *adap, unsigned int *pace_vals,
3326 unsigned int start, unsigned int n)
3328 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
3330 for ( ; n; n--, start++, pace_vals++)
3331 t3_write_reg(adap, A_TP_PACE_TABLE, (start << 16) |
3332 ((*pace_vals + tick_ns / 2) / tick_ns));
3335 #define ulp_region(adap, name, start, len) \
3336 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
3337 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
3338 (start) + (len) - 1); \
3341 #define ulptx_region(adap, name, start, len) \
3342 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
3343 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
3344 (start) + (len) - 1)
3346 static void ulp_config(adapter_t *adap, const struct tp_params *p)
3348 unsigned int m = p->chan_rx_size;
3350 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
3351 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
3352 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
3353 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
3354 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
3355 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
3356 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
3357 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
3362 * t3_set_proto_sram - set the contents of the protocol sram
3363 * @adapter: the adapter
3364 * @data: the protocol image
3366 * Write the contents of the protocol SRAM.
3368 int t3_set_proto_sram(adapter_t *adap, const u8 *data)
3371 const u32 *buf = (const u32 *)data;
3373 for (i = 0; i < PROTO_SRAM_LINES; i++) {
3374 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, cpu_to_be32(*buf++));
3375 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, cpu_to_be32(*buf++));
3376 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, cpu_to_be32(*buf++));
3377 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, cpu_to_be32(*buf++));
3378 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, cpu_to_be32(*buf++));
3380 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
3381 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
3389 * t3_config_trace_filter - configure one of the tracing filters
3390 * @adapter: the adapter
3391 * @tp: the desired trace filter parameters
3392 * @filter_index: which filter to configure
3393 * @invert: if set non-matching packets are traced instead of matching ones
3394 * @enable: whether to enable or disable the filter
3396 * Configures one of the tracing filters available in HW.
3398 void t3_config_trace_filter(adapter_t *adapter, const struct trace_params *tp,
3399 int filter_index, int invert, int enable)
3401 u32 addr, key[4], mask[4];
3403 key[0] = tp->sport | (tp->sip << 16);
3404 key[1] = (tp->sip >> 16) | (tp->dport << 16);
3406 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
3408 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
3409 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
3410 mask[2] = tp->dip_mask;
3411 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
3414 key[3] |= (1 << 29);
3416 key[3] |= (1 << 28);
3418 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3419 tp_wr_indirect(adapter, addr++, key[0]);
3420 tp_wr_indirect(adapter, addr++, mask[0]);
3421 tp_wr_indirect(adapter, addr++, key[1]);
3422 tp_wr_indirect(adapter, addr++, mask[1]);
3423 tp_wr_indirect(adapter, addr++, key[2]);
3424 tp_wr_indirect(adapter, addr++, mask[2]);
3425 tp_wr_indirect(adapter, addr++, key[3]);
3426 tp_wr_indirect(adapter, addr, mask[3]);
3427 (void) t3_read_reg(adapter, A_TP_PIO_DATA);
3431 * t3_query_trace_filter - query a tracing filter
3432 * @adapter: the adapter
3433 * @tp: the current trace filter parameters
3434 * @filter_index: which filter to query
3435 * @inverted: non-zero if the filter is inverted
3436 * @enabled: non-zero if the filter is enabled
3438 * Returns the current settings of the specified HW tracing filter.
3440 void t3_query_trace_filter(adapter_t *adapter, struct trace_params *tp,
3441 int filter_index, int *inverted, int *enabled)
3443 u32 addr, key[4], mask[4];
3445 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3446 key[0] = tp_rd_indirect(adapter, addr++);
3447 mask[0] = tp_rd_indirect(adapter, addr++);
3448 key[1] = tp_rd_indirect(adapter, addr++);
3449 mask[1] = tp_rd_indirect(adapter, addr++);
3450 key[2] = tp_rd_indirect(adapter, addr++);
3451 mask[2] = tp_rd_indirect(adapter, addr++);
3452 key[3] = tp_rd_indirect(adapter, addr++);
3453 mask[3] = tp_rd_indirect(adapter, addr);
3455 tp->sport = key[0] & 0xffff;
3456 tp->sip = (key[0] >> 16) | ((key[1] & 0xffff) << 16);
3457 tp->dport = key[1] >> 16;
3459 tp->proto = key[3] & 0xff;
3460 tp->vlan = key[3] >> 8;
3461 tp->intf = key[3] >> 20;
3463 tp->sport_mask = mask[0] & 0xffff;
3464 tp->sip_mask = (mask[0] >> 16) | ((mask[1] & 0xffff) << 16);
3465 tp->dport_mask = mask[1] >> 16;
3466 tp->dip_mask = mask[2];
3467 tp->proto_mask = mask[3] & 0xff;
3468 tp->vlan_mask = mask[3] >> 8;
3469 tp->intf_mask = mask[3] >> 20;
3471 *inverted = key[3] & (1 << 29);
3472 *enabled = key[3] & (1 << 28);
3476 * t3_config_sched - configure a HW traffic scheduler
3477 * @adap: the adapter
3478 * @kbps: target rate in Kbps
3479 * @sched: the scheduler index
3481 * Configure a Tx HW scheduler for the target rate.
3483 int t3_config_sched(adapter_t *adap, unsigned int kbps, int sched)
3485 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3486 unsigned int clk = adap->params.vpd.cclk * 1000;
3487 unsigned int selected_cpt = 0, selected_bpt = 0;
3490 kbps *= 125; /* -> bytes */
3491 for (cpt = 1; cpt <= 255; cpt++) {
3493 bpt = (kbps + tps / 2) / tps;
3494 if (bpt > 0 && bpt <= 255) {
3496 delta = v >= kbps ? v - kbps : kbps - v;
3497 if (delta < mindelta) {
3502 } else if (selected_cpt)
3508 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3509 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3510 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3512 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3514 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3515 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3520 * t3_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
3521 * @adap: the adapter
3522 * @sched: the scheduler index
3523 * @ipg: the interpacket delay in tenths of nanoseconds
3525 * Set the interpacket delay for a HW packet rate scheduler.
3527 int t3_set_sched_ipg(adapter_t *adap, int sched, unsigned int ipg)
3529 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3531 /* convert ipg to nearest number of core clocks */
3532 ipg *= core_ticks_per_usec(adap);
3533 ipg = (ipg + 5000) / 10000;
3537 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3538 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3540 v = (v & 0xffff) | (ipg << 16);
3542 v = (v & 0xffff0000) | ipg;
3543 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3544 t3_read_reg(adap, A_TP_TM_PIO_DATA);
3549 * t3_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3550 * @adap: the adapter
3551 * @sched: the scheduler index
3552 * @kbps: the byte rate in Kbps
3553 * @ipg: the interpacket delay in tenths of nanoseconds
3555 * Return the current configuration of a HW Tx scheduler.
3557 void t3_get_tx_sched(adapter_t *adap, unsigned int sched, unsigned int *kbps,
3560 unsigned int v, addr, bpt, cpt;
3563 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3564 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3565 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3568 bpt = (v >> 8) & 0xff;
3571 *kbps = 0; /* scheduler disabled */
3573 v = (adap->params.vpd.cclk * 1000) / cpt;
3574 *kbps = (v * bpt) / 125;
3578 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3579 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3580 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3584 *ipg = (10000 * v) / core_ticks_per_usec(adap);
3589 * tp_init - configure TP
3590 * @adap: the adapter
3591 * @p: TP configuration parameters
3593 * Initializes the TP HW module.
3595 static int tp_init(adapter_t *adap, const struct tp_params *p)
3600 t3_set_vlan_accel(adap, 3, 0);
3602 if (is_offload(adap)) {
3603 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3604 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3605 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3608 CH_ERR(adap, "TP initialization timed out\n");
3612 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3617 * t3_mps_set_active_ports - configure port failover
3618 * @adap: the adapter
3619 * @port_mask: bitmap of active ports
3621 * Sets the active ports according to the supplied bitmap.
3623 int t3_mps_set_active_ports(adapter_t *adap, unsigned int port_mask)
3625 if (port_mask & ~((1 << adap->params.nports) - 1))
3627 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3628 port_mask << S_PORT0ACTIVE);
3633 * chan_init_hw - channel-dependent HW initialization
3634 * @adap: the adapter
3635 * @chan_map: bitmap of Tx channels being used
3637 * Perform the bits of HW initialization that are dependent on the Tx
3638 * channels being used.
3640 static void chan_init_hw(adapter_t *adap, unsigned int chan_map)
3644 if (chan_map != 3) { /* one channel */
3645 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3646 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3647 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3648 (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3649 F_TPTXPORT1EN | F_PORT1ACTIVE));
3650 t3_write_reg(adap, A_PM1_TX_CFG,
3651 chan_map == 1 ? 0xffffffff : 0);
3653 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3654 V_TX_MOD_QUEUE_REQ_MAP(0xff));
3655 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xd9c8);
3656 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfbea);
3657 } else { /* two channels */
3658 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3659 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3660 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3661 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3662 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3663 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3665 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3666 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3667 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3668 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3669 for (i = 0; i < 16; i++)
3670 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3671 (i << 16) | 0x1010);
3672 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xba98);
3673 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfedc);
3677 static int calibrate_xgm(adapter_t *adapter)
3679 if (uses_xaui(adapter)) {
3682 for (i = 0; i < 5; ++i) {
3683 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3684 (void) t3_read_reg(adapter, A_XGM_XAUI_IMP);
3686 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3687 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3688 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3689 V_XAUIIMP(G_CALIMP(v) >> 2));
3693 CH_ERR(adapter, "MAC calibration failed\n");
3696 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3697 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3698 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3699 F_XGM_IMPSETUPDATE);
3704 static void calibrate_xgm_t3b(adapter_t *adapter)
3706 if (!uses_xaui(adapter)) {
3707 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3708 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3709 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3710 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3711 F_XGM_IMPSETUPDATE);
3712 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3714 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3715 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3719 struct mc7_timing_params {
3720 unsigned char ActToPreDly;
3721 unsigned char ActToRdWrDly;
3722 unsigned char PreCyc;
3723 unsigned char RefCyc[5];
3724 unsigned char BkCyc;
3725 unsigned char WrToRdDly;
3726 unsigned char RdToWrDly;
3730 * Write a value to a register and check that the write completed. These
3731 * writes normally complete in a cycle or two, so one read should suffice.
3732 * The very first read exists to flush the posted write to the device.
3734 static int wrreg_wait(adapter_t *adapter, unsigned int addr, u32 val)
3736 t3_write_reg(adapter, addr, val);
3737 (void) t3_read_reg(adapter, addr); /* flush */
3738 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3740 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3744 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3746 static const unsigned int mc7_mode[] = {
3747 0x632, 0x642, 0x652, 0x432, 0x442
3749 static const struct mc7_timing_params mc7_timings[] = {
3750 { 12, 3, 4, { 20, 28, 34, 52, 0 }, 15, 6, 4 },
3751 { 12, 4, 5, { 20, 28, 34, 52, 0 }, 16, 7, 4 },
3752 { 12, 5, 6, { 20, 28, 34, 52, 0 }, 17, 8, 4 },
3753 { 9, 3, 4, { 15, 21, 26, 39, 0 }, 12, 6, 4 },
3754 { 9, 4, 5, { 15, 21, 26, 39, 0 }, 13, 7, 4 }
3758 unsigned int width, density, slow, attempts;
3759 adapter_t *adapter = mc7->adapter;
3760 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3765 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3766 slow = val & F_SLOW;
3767 width = G_WIDTH(val);
3768 density = G_DEN(val);
3770 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3771 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3775 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3776 (void) t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3778 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3779 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3780 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3786 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3787 V_ACTTOPREDLY(p->ActToPreDly) |
3788 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3789 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3790 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3792 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3793 val | F_CLKEN | F_TERM150);
3794 (void) t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3797 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3802 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3803 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3804 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3805 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3809 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3810 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL,
3815 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3816 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3817 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3818 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3819 mc7_mode[mem_type]) ||
3820 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3821 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3824 /* clock value is in KHz */
3825 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3826 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3828 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3829 F_PERREFEN | V_PREREFDIV(mc7_clock));
3830 (void) t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3832 t3_write_reg(adapter, mc7->offset + A_MC7_ECC,
3833 F_ECCGENEN | F_ECCCHKEN);
3834 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3835 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3836 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3837 (mc7->size << width) - 1);
3838 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3839 (void) t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3844 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3845 } while ((val & F_BUSY) && --attempts);
3847 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3851 /* Enable normal memory accesses. */
3852 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3859 static void config_pcie(adapter_t *adap)
3861 static const u16 ack_lat[4][6] = {
3862 { 237, 416, 559, 1071, 2095, 4143 },
3863 { 128, 217, 289, 545, 1057, 2081 },
3864 { 73, 118, 154, 282, 538, 1050 },
3865 { 67, 107, 86, 150, 278, 534 }
3867 static const u16 rpl_tmr[4][6] = {
3868 { 711, 1248, 1677, 3213, 6285, 12429 },
3869 { 384, 651, 867, 1635, 3171, 6243 },
3870 { 219, 354, 462, 846, 1614, 3150 },
3871 { 201, 321, 258, 450, 834, 1602 }
3875 unsigned int log2_width, pldsize;
3876 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3878 t3_os_pci_read_config_2(adap,
3879 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3881 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3883 t3_os_pci_read_config_2(adap,
3884 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3887 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3888 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3889 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3890 log2_width = fls(adap->params.pci.width) - 1;
3891 acklat = ack_lat[log2_width][pldsize];
3892 if (val & 1) /* check LOsEnable */
3893 acklat += fst_trn_tx * 4;
3894 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3896 if (adap->params.rev == 0)
3897 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3898 V_T3A_ACKLAT(M_T3A_ACKLAT),
3899 V_T3A_ACKLAT(acklat));
3901 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3904 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3905 V_REPLAYLMT(rpllmt));
3907 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3908 t3_set_reg_field(adap, A_PCIE_CFG, 0,
3909 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
3910 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
3914 * t3_init_hw - initialize and configure T3 HW modules
3915 * @adapter: the adapter
3916 * @fw_params: initial parameters to pass to firmware (optional)
3918 * Initialize and configure T3 HW modules. This performs the
3919 * initialization steps that need to be done once after a card is reset.
3920 * MAC and PHY initialization is handled separarely whenever a port is
3923 * @fw_params are passed to FW and their value is platform dependent.
3924 * Only the top 8 bits are available for use, the rest must be 0.
3926 int t3_init_hw(adapter_t *adapter, u32 fw_params)
3928 int err = -EIO, attempts, i;
3929 const struct vpd_params *vpd = &adapter->params.vpd;
3931 if (adapter->params.rev > 0)
3932 calibrate_xgm_t3b(adapter);
3933 else if (calibrate_xgm(adapter))
3936 if (adapter->params.nports > 2)
3937 t3_mac_reset(&adap2pinfo(adapter, 0)->mac);
3940 partition_mem(adapter, &adapter->params.tp);
3942 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3943 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3944 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3945 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3946 adapter->params.mc5.nfilters,
3947 adapter->params.mc5.nroutes))
3950 for (i = 0; i < 32; i++)
3951 if (clear_sge_ctxt(adapter, i, F_CQ))
3955 if (tp_init(adapter, &adapter->params.tp))
3958 #ifdef CONFIG_CHELSIO_T3_CORE
3959 t3_tp_set_coalescing_size(adapter,
3960 min(adapter->params.sge.max_pkt_size,
3961 MAX_RX_COALESCING_LEN), 1);
3962 t3_tp_set_max_rxsize(adapter,
3963 min(adapter->params.sge.max_pkt_size, 16384U));
3964 ulp_config(adapter, &adapter->params.tp);
3966 if (is_pcie(adapter))
3967 config_pcie(adapter);
3969 t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3970 F_DMASTOPEN | F_CLIDECEN);
3972 if (adapter->params.rev == T3_REV_C)
3973 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3974 F_CFG_CQE_SOP_MASK);
3976 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3977 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3978 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3979 chan_init_hw(adapter, adapter->params.chan_map);
3980 t3_sge_init(adapter, &adapter->params.sge);
3982 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3984 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3985 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3986 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3987 (void) t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3990 do { /* wait for uP to initialize */
3992 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3994 CH_ERR(adapter, "uP initialization timed out\n");
4004 * get_pci_mode - determine a card's PCI mode
4005 * @adapter: the adapter
4006 * @p: where to store the PCI settings
4008 * Determines a card's PCI mode and associated parameters, such as speed
4011 static void __devinit get_pci_mode(adapter_t *adapter, struct pci_params *p)
4013 static unsigned short speed_map[] = { 33, 66, 100, 133 };
4014 u32 pci_mode, pcie_cap;
4016 pcie_cap = t3_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
4020 p->variant = PCI_VARIANT_PCIE;
4021 p->pcie_cap_addr = pcie_cap;
4022 t3_os_pci_read_config_2(adapter, pcie_cap + PCI_EXP_LNKSTA,
4024 p->width = (val >> 4) & 0x3f;
4028 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
4029 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
4030 p->width = (pci_mode & F_64BIT) ? 64 : 32;
4031 pci_mode = G_PCIXINITPAT(pci_mode);
4033 p->variant = PCI_VARIANT_PCI;
4034 else if (pci_mode < 4)
4035 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
4036 else if (pci_mode < 8)
4037 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
4039 p->variant = PCI_VARIANT_PCIX_266_MODE2;
4043 * init_link_config - initialize a link's SW state
4044 * @lc: structure holding the link state
4045 * @caps: link capabilities
4047 * Initializes the SW state maintained for each link, including the link's
4048 * capabilities and default speed/duplex/flow-control/autonegotiation
4051 static void __devinit init_link_config(struct link_config *lc,
4054 lc->supported = caps;
4055 lc->requested_speed = lc->speed = SPEED_INVALID;
4056 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
4057 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
4058 if (lc->supported & SUPPORTED_Autoneg) {
4059 lc->advertising = lc->supported;
4060 lc->autoneg = AUTONEG_ENABLE;
4061 lc->requested_fc |= PAUSE_AUTONEG;
4063 lc->advertising = 0;
4064 lc->autoneg = AUTONEG_DISABLE;
4069 * mc7_calc_size - calculate MC7 memory size
4070 * @cfg: the MC7 configuration
4072 * Calculates the size of an MC7 memory in bytes from the value of its
4073 * configuration register.
4075 static unsigned int __devinit mc7_calc_size(u32 cfg)
4077 unsigned int width = G_WIDTH(cfg);
4078 unsigned int banks = !!(cfg & F_BKS) + 1;
4079 unsigned int org = !!(cfg & F_ORG) + 1;
4080 unsigned int density = G_DEN(cfg);
4081 unsigned int MBs = ((256 << density) * banks) / (org << width);
4086 static void __devinit mc7_prep(adapter_t *adapter, struct mc7 *mc7,
4087 unsigned int base_addr, const char *name)
4091 mc7->adapter = adapter;
4093 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
4094 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
4095 mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
4096 mc7->width = G_WIDTH(cfg);
4099 void mac_prep(struct cmac *mac, adapter_t *adapter, int index)
4101 mac->adapter = adapter;
4102 mac->multiport = adapter->params.nports > 2;
4103 if (mac->multiport) {
4104 mac->ext_port = (unsigned char)index;
4110 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
4112 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
4113 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
4114 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
4115 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
4121 * early_hw_init - HW initialization done at card detection time
4122 * @adapter: the adapter
4123 * @ai: contains information about the adapter type and properties
4125 * Perfoms the part of HW initialization that is done early on when the
4126 * driver first detecs the card. Most of the HW state is initialized
4127 * lazily later on when a port or an offload function are first used.
4129 void early_hw_init(adapter_t *adapter, const struct adapter_info *ai)
4131 u32 val = V_PORTSPEED(is_10G(adapter) || adapter->params.nports > 2 ?
4133 u32 gpio_out = ai->gpio_out;
4135 mi1_init(adapter, ai);
4136 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
4137 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
4138 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
4139 gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
4140 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
4141 t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
4143 if (adapter->params.rev == 0 || !uses_xaui(adapter))
4146 /* Enable MAC clocks so we can access the registers */
4147 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
4148 (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
4150 val |= F_CLKDIVRESET_;
4151 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
4152 (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
4153 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
4154 (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
4158 * t3_reset_adapter - reset the adapter
4159 * @adapter: the adapter
4161 * Reset the adapter.
4163 int t3_reset_adapter(adapter_t *adapter)
4165 int i, save_and_restore_pcie =
4166 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
4169 if (save_and_restore_pcie)
4170 t3_os_pci_save_state(adapter);
4171 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
4174 * Delay. Give Some time to device to reset fully.
4175 * XXX The delay time should be modified.
4177 for (i = 0; i < 10; i++) {
4179 t3_os_pci_read_config_2(adapter, 0x00, &devid);
4180 if (devid == 0x1425)
4184 if (devid != 0x1425)
4187 if (save_and_restore_pcie)
4188 t3_os_pci_restore_state(adapter);
4192 static int init_parity(adapter_t *adap)
4196 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
4199 for (err = i = 0; !err && i < 16; i++)
4200 err = clear_sge_ctxt(adap, i, F_EGRESS);
4201 for (i = 0xfff0; !err && i <= 0xffff; i++)
4202 err = clear_sge_ctxt(adap, i, F_EGRESS);
4203 for (i = 0; !err && i < SGE_QSETS; i++)
4204 err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
4208 t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
4209 for (i = 0; i < 4; i++)
4210 for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
4211 t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
4212 F_IBQDBGWR | V_IBQDBGQID(i) |
4213 V_IBQDBGADDR(addr));
4214 err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
4215 F_IBQDBGBUSY, 0, 2, 1);
4223 * t3_prep_adapter - prepare SW and HW for operation
4224 * @adapter: the adapter
4225 * @ai: contains information about the adapter type and properties
4227 * Initialize adapter SW state for the various HW modules, set initial
4228 * values for some adapter tunables, take PHYs out of reset, and
4229 * initialize the MDIO interface.
4231 int __devinit t3_prep_adapter(adapter_t *adapter,
4232 const struct adapter_info *ai, int reset)
4235 unsigned int i, j = 0;
4237 get_pci_mode(adapter, &adapter->params.pci);
4239 adapter->params.info = ai;
4240 adapter->params.nports = ai->nports0 + ai->nports1;
4241 adapter->params.chan_map = !!ai->nports0 | (!!ai->nports1 << 1);
4242 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
4245 * We used to only run the "adapter check task" once a second if
4246 * we had PHYs which didn't support interrupts (we would check
4247 * their link status once a second). Now we check other conditions
4248 * in that routine which would [potentially] impose a very high
4249 * interrupt load on the system. As such, we now always scan the
4250 * adapter state once a second ...
4252 adapter->params.linkpoll_period = 10;
4254 if (adapter->params.nports > 2)
4255 adapter->params.stats_update_period = VSC_STATS_ACCUM_SECS;
4257 adapter->params.stats_update_period = is_10G(adapter) ?
4258 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
4259 adapter->params.pci.vpd_cap_addr =
4260 t3_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
4262 ret = get_vpd_params(adapter, &adapter->params.vpd);
4266 if (reset && t3_reset_adapter(adapter))
4269 t3_sge_prep(adapter, &adapter->params.sge);
4271 if (adapter->params.vpd.mclk) {
4272 struct tp_params *p = &adapter->params.tp;
4274 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
4275 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
4276 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
4278 p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
4279 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
4280 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
4281 p->cm_size = t3_mc7_size(&adapter->cm);
4282 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
4283 p->chan_tx_size = p->pmtx_size / p->nchan;
4284 p->rx_pg_size = 64 * 1024;
4285 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
4286 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
4287 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
4288 p->ntimer_qs = p->cm_size >= (128 << 20) ||
4289 adapter->params.rev > 0 ? 12 : 6;
4290 p->tre = fls(adapter->params.vpd.cclk / (1000 / TP_TMR_RES)) -
4292 p->dack_re = fls(adapter->params.vpd.cclk / 10) - 1; /* 100us */
4295 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
4296 t3_mc7_size(&adapter->pmtx) &&
4297 t3_mc7_size(&adapter->cm);
4299 if (is_offload(adapter)) {
4300 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
4301 /* PR 6487. TOE and filtering are mutually exclusive */
4302 adapter->params.mc5.nfilters = 0;
4303 adapter->params.mc5.nroutes = 0;
4304 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
4306 #ifdef CONFIG_CHELSIO_T3_CORE
4307 init_mtus(adapter->params.mtus);
4308 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
4312 early_hw_init(adapter, ai);
4313 ret = init_parity(adapter);
4317 if (adapter->params.nports > 2 &&
4318 (ret = t3_vsc7323_init(adapter, adapter->params.nports)))
4321 for_each_port(adapter, i) {
4323 const struct port_type_info *pti;
4324 struct port_info *p = adap2pinfo(adapter, i);
4327 unsigned port_type = adapter->params.vpd.port_type[j];
4329 if (port_type < ARRAY_SIZE(port_types)) {
4330 pti = &port_types[port_type];
4336 if (j >= ARRAY_SIZE(adapter->params.vpd.port_type))
4339 ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
4343 mac_prep(&p->mac, adapter, j);
4347 * The VPD EEPROM stores the base Ethernet address for the
4348 * card. A port's address is derived from the base by adding
4349 * the port's index to the base's low octet.
4351 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
4352 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
4354 t3_os_set_hw_addr(adapter, i, hw_addr);
4355 init_link_config(&p->link_config, p->phy.caps);
4356 p->phy.ops->power_down(&p->phy, 1);
4359 * If the PHY doesn't support interrupts for link status
4360 * changes, schedule a scan of the adapter links at least
4363 if (!(p->phy.caps & SUPPORTED_IRQ) &&
4364 adapter->params.linkpoll_period > 10)
4365 adapter->params.linkpoll_period = 10;
4372 * t3_reinit_adapter - prepare HW for operation again
4373 * @adapter: the adapter
4375 * Put HW in the same state as @t3_prep_adapter without any changes to
4376 * SW state. This is a cut down version of @t3_prep_adapter intended
4377 * to be used after events that wipe out HW state but preserve SW state,
4378 * e.g., EEH. The device must be reset before calling this.
4380 int t3_reinit_adapter(adapter_t *adap)
4385 early_hw_init(adap, adap->params.info);
4386 ret = init_parity(adap);
4390 if (adap->params.nports > 2 &&
4391 (ret = t3_vsc7323_init(adap, adap->params.nports)))
4394 for_each_port(adap, i) {
4395 const struct port_type_info *pti;
4396 struct port_info *p = adap2pinfo(adap, i);
4399 unsigned port_type = adap->params.vpd.port_type[j];
4401 if (port_type < ARRAY_SIZE(port_types)) {
4402 pti = &port_types[port_type];
4408 if (j >= ARRAY_SIZE(adap->params.vpd.port_type))
4411 ret = pti->phy_prep(&p->phy, adap, p->phy.addr, NULL);
4414 p->phy.ops->power_down(&p->phy, 1);
4419 void t3_led_ready(adapter_t *adapter)
4421 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
4425 void t3_port_failover(adapter_t *adapter, int port)
4429 val = port ? F_PORT1ACTIVE : F_PORT0ACTIVE;
4430 t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4434 void t3_failover_done(adapter_t *adapter, int port)
4436 t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4437 F_PORT0ACTIVE | F_PORT1ACTIVE);
4440 void t3_failover_clear(adapter_t *adapter)
4442 t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4443 F_PORT0ACTIVE | F_PORT1ACTIVE);
4446 static int t3_cim_hac_read(adapter_t *adapter, u32 addr, u32 *val)
4450 t3_write_reg(adapter, A_CIM_HOST_ACC_CTRL, addr);
4451 if (t3_wait_op_done_val(adapter, A_CIM_HOST_ACC_CTRL,
4452 F_HOSTBUSY, 0, 10, 10, &v))
4455 *val = t3_read_reg(adapter, A_CIM_HOST_ACC_DATA);
4460 static int t3_cim_hac_write(adapter_t *adapter, u32 addr, u32 val)
4464 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, val);
4466 addr |= F_HOSTWRITE;
4467 t3_write_reg(adapter, A_CIM_HOST_ACC_CTRL, addr);
4469 if (t3_wait_op_done_val(adapter, A_CIM_HOST_ACC_CTRL,
4470 F_HOSTBUSY, 0, 10, 5, &v))
4475 int t3_get_up_la(adapter_t *adapter, u32 *stopped, u32 *index,
4476 u32 *size, void *data)
4481 if (*size < LA_ENTRIES * 4)
4484 ret = t3_cim_hac_read(adapter, LA_CTRL, &v);
4488 *stopped = !(v & 1);
4492 ret = t3_cim_hac_write(adapter, LA_CTRL, 0);
4497 for (i = 0; i < LA_ENTRIES; i++) {
4498 v = (i << 2) | (1 << 1);
4499 ret = t3_cim_hac_write(adapter, LA_CTRL, v);
4503 ret = t3_cim_hac_read(adapter, LA_CTRL, &v);
4508 while ((v & (1 << 1)) && cnt) {
4511 ret = t3_cim_hac_read(adapter, LA_CTRL, &v);
4519 ret = t3_cim_hac_read(adapter, LA_DATA, &v);
4526 ret = t3_cim_hac_read(adapter, LA_CTRL, &v);
4530 *index = (v >> 16) + 4;
4531 *size = LA_ENTRIES * 4;
4534 t3_cim_hac_write(adapter, LA_CTRL, 1);
4538 int t3_get_up_ioqs(adapter_t *adapter, u32 *size, void *data)
4543 if (*size < IOQ_ENTRIES * sizeof(struct t3_ioq_entry))
4546 for (i = 0; i < 4; i++) {
4547 ret = t3_cim_hac_read(adapter, (4 * i), &v);
4554 for (i = 0; i < IOQ_ENTRIES; i++) {
4555 u32 base_addr = 0x10 * (i + 1);
4557 for (j = 0; j < 4; j++) {
4558 ret = t3_cim_hac_read(adapter, base_addr + 4 * j, &v);
4566 *size = IOQ_ENTRIES * sizeof(struct t3_ioq_entry);