1 /**************************************************************************
3 Copyright (c) 2007-2009, Chelsio Inc.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
28 ***************************************************************************/
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
34 #include <cxgb_include.h>
37 #define msleep t3_os_sleep
40 * t3_wait_op_done_val - wait until an operation is completed
41 * @adapter: the adapter performing the operation
42 * @reg: the register to check for completion
43 * @mask: a single-bit field within @reg that indicates completion
44 * @polarity: the value of the field when the operation is completed
45 * @attempts: number of check iterations
46 * @delay: delay in usecs between iterations
47 * @valp: where to store the value of the register at completion time
49 * Wait until an operation is completed by checking a bit in a register
50 * up to @attempts times. If @valp is not NULL the value of the register
51 * at the time it indicated completion is stored there. Returns 0 if the
52 * operation completes and -EAGAIN otherwise.
54 int t3_wait_op_done_val(adapter_t *adapter, int reg, u32 mask, int polarity,
55 int attempts, int delay, u32 *valp)
58 u32 val = t3_read_reg(adapter, reg);
60 if (!!(val & mask) == polarity) {
73 * t3_write_regs - write a bunch of registers
74 * @adapter: the adapter to program
75 * @p: an array of register address/register value pairs
76 * @n: the number of address/value pairs
77 * @offset: register address offset
79 * Takes an array of register address/register value pairs and writes each
80 * value to the corresponding register. Register addresses are adjusted
81 * by the supplied offset.
83 void t3_write_regs(adapter_t *adapter, const struct addr_val_pair *p, int n,
87 t3_write_reg(adapter, p->reg_addr + offset, p->val);
93 * t3_set_reg_field - set a register field to a value
94 * @adapter: the adapter to program
95 * @addr: the register address
96 * @mask: specifies the portion of the register to modify
97 * @val: the new value for the register field
99 * Sets a register field specified by the supplied mask to the
102 void t3_set_reg_field(adapter_t *adapter, unsigned int addr, u32 mask, u32 val)
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
106 t3_write_reg(adapter, addr, v | val);
107 (void) t3_read_reg(adapter, addr); /* flush */
111 * t3_read_indirect - read indirectly addressed registers
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
119 * Reads registers that are accessed indirectly through an address/data
122 static void t3_read_indirect(adapter_t *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals, unsigned int nregs,
124 unsigned int start_idx)
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
143 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
146 static int shift[] = { 0, 0, 16, 24 };
147 static int step[] = { 0, 32, 16, 8 };
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 adapter_t *adap = mc7->adapter;
152 if (start >= size64 || start + n > size64)
155 start *= (8 << mc7->width);
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR,
166 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
167 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
168 while ((val & F_BUSY) && attempts--)
169 val = t3_read_reg(adap,
170 mc7->offset + A_MC7_BD_OP);
174 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
175 if (mc7->width == 0) {
176 val64 = t3_read_reg(adap,
177 mc7->offset + A_MC7_BD_DATA0);
178 val64 |= (u64)val << 32;
181 val >>= shift[mc7->width];
182 val64 |= (u64)val << (step[mc7->width] * i);
192 * Low-level I2C read and write routines. These simply read and write a
193 * single byte with the option of indicating a "continue" if another operation
194 * is to be chained. Generally most code will use higher-level routines to
195 * read and write to I2C Slave Devices.
197 #define I2C_ATTEMPTS 100
200 * Read an 8-bit value from the I2C bus. If the "chained" parameter is
201 * non-zero then a STOP bit will not be written after the read command. On
202 * error (the read timed out, etc.), a negative errno will be returned (e.g.
203 * -EAGAIN, etc.). On success, the 8-bit value read from the I2C bus is
204 * stored into the buffer *valp and the value of the I2C ACK bit is returned
207 int t3_i2c_read8(adapter_t *adapter, int chained, u8 *valp)
212 t3_write_reg(adapter, A_I2C_OP,
213 F_I2C_READ | (chained ? F_I2C_CONT : 0));
214 ret = t3_wait_op_done_val(adapter, A_I2C_OP, F_I2C_BUSY, 0,
215 I2C_ATTEMPTS, 10, &opval);
217 ret = ((opval & F_I2C_ACK) == F_I2C_ACK);
218 *valp = G_I2C_DATA(t3_read_reg(adapter, A_I2C_DATA));
220 MDIO_UNLOCK(adapter);
225 * Write an 8-bit value to the I2C bus. If the "chained" parameter is
226 * non-zero, then a STOP bit will not be written after the write command. On
227 * error (the write timed out, etc.), a negative errno will be returned (e.g.
228 * -EAGAIN, etc.). On success, the value of the I2C ACK bit is returned as a
231 int t3_i2c_write8(adapter_t *adapter, int chained, u8 val)
236 t3_write_reg(adapter, A_I2C_DATA, V_I2C_DATA(val));
237 t3_write_reg(adapter, A_I2C_OP,
238 F_I2C_WRITE | (chained ? F_I2C_CONT : 0));
239 ret = t3_wait_op_done_val(adapter, A_I2C_OP, F_I2C_BUSY, 0,
240 I2C_ATTEMPTS, 10, &opval);
242 ret = ((opval & F_I2C_ACK) == F_I2C_ACK);
243 MDIO_UNLOCK(adapter);
250 static void mi1_init(adapter_t *adap, const struct adapter_info *ai)
252 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
253 u32 val = F_PREEN | V_CLKDIV(clkdiv);
255 t3_write_reg(adap, A_MI1_CFG, val);
258 #define MDIO_ATTEMPTS 20
261 * MI1 read/write operations for clause 22 PHYs.
263 int t3_mi1_read(adapter_t *adapter, int phy_addr, int mmd_addr,
264 int reg_addr, unsigned int *valp)
267 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
273 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
274 t3_write_reg(adapter, A_MI1_ADDR, addr);
275 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
276 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
278 *valp = t3_read_reg(adapter, A_MI1_DATA);
279 MDIO_UNLOCK(adapter);
283 int t3_mi1_write(adapter_t *adapter, int phy_addr, int mmd_addr,
284 int reg_addr, unsigned int val)
287 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
293 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
294 t3_write_reg(adapter, A_MI1_ADDR, addr);
295 t3_write_reg(adapter, A_MI1_DATA, val);
296 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
297 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
298 MDIO_UNLOCK(adapter);
302 static struct mdio_ops mi1_mdio_ops = {
308 * MI1 read/write operations for clause 45 PHYs.
310 static int mi1_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
311 int reg_addr, unsigned int *valp)
314 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
317 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
318 t3_write_reg(adapter, A_MI1_ADDR, addr);
319 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
320 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
321 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
323 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
324 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
327 *valp = t3_read_reg(adapter, A_MI1_DATA);
329 MDIO_UNLOCK(adapter);
333 static int mi1_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
334 int reg_addr, unsigned int val)
337 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
340 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
341 t3_write_reg(adapter, A_MI1_ADDR, addr);
342 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
343 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
344 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
346 t3_write_reg(adapter, A_MI1_DATA, val);
347 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
348 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
351 MDIO_UNLOCK(adapter);
355 static struct mdio_ops mi1_mdio_ext_ops = {
361 * t3_mdio_change_bits - modify the value of a PHY register
362 * @phy: the PHY to operate on
363 * @mmd: the device address
364 * @reg: the register address
365 * @clear: what part of the register value to mask off
366 * @set: what part of the register value to set
368 * Changes the value of a PHY register by applying a mask to its current
369 * value and ORing the result with a new value.
371 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
377 ret = mdio_read(phy, mmd, reg, &val);
380 ret = mdio_write(phy, mmd, reg, val | set);
386 * t3_phy_reset - reset a PHY block
387 * @phy: the PHY to operate on
388 * @mmd: the device address of the PHY block to reset
389 * @wait: how long to wait for the reset to complete in 1ms increments
391 * Resets a PHY block and optionally waits for the reset to complete.
392 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
395 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
400 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
405 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
411 } while (ctl && --wait);
417 * t3_phy_advertise - set the PHY advertisement registers for autoneg
418 * @phy: the PHY to operate on
419 * @advert: bitmap of capabilities the PHY should advertise
421 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
422 * requested capabilities.
424 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
427 unsigned int val = 0;
429 err = mdio_read(phy, 0, MII_CTRL1000, &val);
433 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
434 if (advert & ADVERTISED_1000baseT_Half)
435 val |= ADVERTISE_1000HALF;
436 if (advert & ADVERTISED_1000baseT_Full)
437 val |= ADVERTISE_1000FULL;
439 err = mdio_write(phy, 0, MII_CTRL1000, val);
444 if (advert & ADVERTISED_10baseT_Half)
445 val |= ADVERTISE_10HALF;
446 if (advert & ADVERTISED_10baseT_Full)
447 val |= ADVERTISE_10FULL;
448 if (advert & ADVERTISED_100baseT_Half)
449 val |= ADVERTISE_100HALF;
450 if (advert & ADVERTISED_100baseT_Full)
451 val |= ADVERTISE_100FULL;
452 if (advert & ADVERTISED_Pause)
453 val |= ADVERTISE_PAUSE_CAP;
454 if (advert & ADVERTISED_Asym_Pause)
455 val |= ADVERTISE_PAUSE_ASYM;
456 return mdio_write(phy, 0, MII_ADVERTISE, val);
460 * t3_phy_advertise_fiber - set fiber PHY advertisement register
461 * @phy: the PHY to operate on
462 * @advert: bitmap of capabilities the PHY should advertise
464 * Sets a fiber PHY's advertisement register to advertise the
465 * requested capabilities.
467 int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
469 unsigned int val = 0;
471 if (advert & ADVERTISED_1000baseT_Half)
472 val |= ADVERTISE_1000XHALF;
473 if (advert & ADVERTISED_1000baseT_Full)
474 val |= ADVERTISE_1000XFULL;
475 if (advert & ADVERTISED_Pause)
476 val |= ADVERTISE_1000XPAUSE;
477 if (advert & ADVERTISED_Asym_Pause)
478 val |= ADVERTISE_1000XPSE_ASYM;
479 return mdio_write(phy, 0, MII_ADVERTISE, val);
483 * t3_set_phy_speed_duplex - force PHY speed and duplex
484 * @phy: the PHY to operate on
485 * @speed: requested PHY speed
486 * @duplex: requested PHY duplex
488 * Force a 10/100/1000 PHY's speed and duplex. This also disables
489 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
491 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
496 err = mdio_read(phy, 0, MII_BMCR, &ctl);
501 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
502 if (speed == SPEED_100)
503 ctl |= BMCR_SPEED100;
504 else if (speed == SPEED_1000)
505 ctl |= BMCR_SPEED1000;
508 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
509 if (duplex == DUPLEX_FULL)
510 ctl |= BMCR_FULLDPLX;
512 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
513 ctl |= BMCR_ANENABLE;
514 return mdio_write(phy, 0, MII_BMCR, ctl);
517 int t3_phy_lasi_intr_enable(struct cphy *phy)
519 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
522 int t3_phy_lasi_intr_disable(struct cphy *phy)
524 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
527 int t3_phy_lasi_intr_clear(struct cphy *phy)
531 return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
534 int t3_phy_lasi_intr_handler(struct cphy *phy)
537 int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
541 return (status & 1) ? cphy_cause_link_change : 0;
544 static struct adapter_info t3_adap_info[] = {
546 F_GPIO2_OEN | F_GPIO4_OEN |
547 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
548 &mi1_mdio_ops, "Chelsio PE9000" },
550 F_GPIO2_OEN | F_GPIO4_OEN |
551 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
552 &mi1_mdio_ops, "Chelsio T302" },
554 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
555 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
556 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
557 &mi1_mdio_ext_ops, "Chelsio T310" },
559 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
560 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
561 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
562 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
563 &mi1_mdio_ext_ops, "Chelsio T320" },
565 F_GPIO5_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO5_OUT_VAL |
566 F_GPIO6_OUT_VAL | F_GPIO7_OUT_VAL,
567 { S_GPIO1, S_GPIO2, S_GPIO3, S_GPIO4 }, SUPPORTED_AUI,
568 &mi1_mdio_ops, "Chelsio T304" },
571 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
572 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
573 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
574 &mi1_mdio_ext_ops, "Chelsio T310" },
576 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
577 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL,
578 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
579 &mi1_mdio_ext_ops, "Chelsio N320E-G2" },
583 * Return the adapter_info structure with a given index. Out-of-range indices
586 const struct adapter_info *t3_get_adapter_info(unsigned int id)
588 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
591 struct port_type_info {
592 int (*phy_prep)(pinfo_t *pinfo, int phy_addr,
593 const struct mdio_ops *ops);
596 static struct port_type_info port_types[] = {
598 { t3_ael1002_phy_prep },
599 { t3_vsc8211_phy_prep },
600 { t3_mv88e1xxx_phy_prep },
601 { t3_xaui_direct_phy_prep },
602 { t3_ael2005_phy_prep },
603 { t3_qt2045_phy_prep },
604 { t3_ael1006_phy_prep },
605 { t3_tn1010_phy_prep },
606 { t3_aq100x_phy_prep },
607 { t3_ael2020_phy_prep },
610 #define VPD_ENTRY(name, len) \
611 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
614 * Partial EEPROM Vital Product Data structure. Includes only the ID and
623 VPD_ENTRY(pn, 16); /* part number */
624 VPD_ENTRY(ec, ECNUM_LEN); /* EC level */
625 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
626 VPD_ENTRY(na, 12); /* MAC address base */
627 VPD_ENTRY(cclk, 6); /* core clock */
628 VPD_ENTRY(mclk, 6); /* mem clock */
629 VPD_ENTRY(uclk, 6); /* uP clk */
630 VPD_ENTRY(mdc, 6); /* MDIO clk */
631 VPD_ENTRY(mt, 2); /* mem timing */
632 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
633 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
634 VPD_ENTRY(port0, 2); /* PHY0 complex */
635 VPD_ENTRY(port1, 2); /* PHY1 complex */
636 VPD_ENTRY(port2, 2); /* PHY2 complex */
637 VPD_ENTRY(port3, 2); /* PHY3 complex */
638 VPD_ENTRY(rv, 1); /* csum */
639 u32 pad; /* for multiple-of-4 sizing and alignment */
642 #define EEPROM_MAX_POLL 40
643 #define EEPROM_STAT_ADDR 0x4000
644 #define VPD_BASE 0xc00
647 * t3_seeprom_read - read a VPD EEPROM location
648 * @adapter: adapter to read
649 * @addr: EEPROM address
650 * @data: where to store the read data
652 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
653 * VPD ROM capability. A zero is written to the flag bit when the
654 * addres is written to the control register. The hardware device will
655 * set the flag to 1 when 4 bytes have been read into the data register.
657 int t3_seeprom_read(adapter_t *adapter, u32 addr, u32 *data)
660 int attempts = EEPROM_MAX_POLL;
661 unsigned int base = adapter->params.pci.vpd_cap_addr;
663 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
666 t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR, (u16)addr);
669 t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
670 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
672 if (!(val & PCI_VPD_ADDR_F)) {
673 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
676 t3_os_pci_read_config_4(adapter, base + PCI_VPD_DATA, data);
677 *data = le32_to_cpu(*data);
682 * t3_seeprom_write - write a VPD EEPROM location
683 * @adapter: adapter to write
684 * @addr: EEPROM address
685 * @data: value to write
687 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
688 * VPD ROM capability.
690 int t3_seeprom_write(adapter_t *adapter, u32 addr, u32 data)
693 int attempts = EEPROM_MAX_POLL;
694 unsigned int base = adapter->params.pci.vpd_cap_addr;
696 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
699 t3_os_pci_write_config_4(adapter, base + PCI_VPD_DATA,
701 t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR,
702 (u16)addr | PCI_VPD_ADDR_F);
705 t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
706 } while ((val & PCI_VPD_ADDR_F) && --attempts);
708 if (val & PCI_VPD_ADDR_F) {
709 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
716 * t3_seeprom_wp - enable/disable EEPROM write protection
717 * @adapter: the adapter
718 * @enable: 1 to enable write protection, 0 to disable it
720 * Enables or disables write protection on the serial EEPROM.
722 int t3_seeprom_wp(adapter_t *adapter, int enable)
724 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
728 * Convert a character holding a hex digit to a number.
730 static unsigned int hex2int(unsigned char c)
732 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
736 * get_desc_len - get the length of a vpd descriptor.
737 * @adapter: the adapter
738 * @offset: first byte offset of the vpd descriptor
740 * Retrieves the length of the small/large resource
741 * data type starting at offset.
743 static int get_desc_len(adapter_t *adapter, u32 offset)
745 u32 read_offset, tmp, shift, len = 0;
749 read_offset = offset & 0xfffffffc;
750 shift = offset & 0x03;
752 ret = t3_seeprom_read(adapter, read_offset, &tmp);
756 *((u32 *)buf) = cpu_to_le32(tmp);
760 ret = t3_seeprom_read(adapter, read_offset + 4, &tmp);
764 *((u32 *)(&buf[4])) = cpu_to_le32(tmp);
765 len = (buf[shift + 1] & 0xff) +
766 ((buf[shift+2] << 8) & 0xff00) + 3;
768 len = (tag & 0x07) + 1;
774 * is_end_tag - Check if a vpd tag is the end tag.
775 * @adapter: the adapter
776 * @offset: first byte offset of the tag
778 * Checks if the tag located at offset is the end tag.
780 static int is_end_tag(adapter_t * adapter, u32 offset)
782 u32 read_offset, shift, ret, tmp;
785 read_offset = offset & 0xfffffffc;
786 shift = offset & 0x03;
788 ret = t3_seeprom_read(adapter, read_offset, &tmp);
791 *((u32 *)buf) = cpu_to_le32(tmp);
793 if (buf[shift] == 0x78)
800 * t3_get_vpd_len - computes the length of a vpd structure
801 * @adapter: the adapter
802 * @vpd: contains the offset of first byte of vpd
804 * Computes the lentgh of the vpd structure starting at vpd->offset.
807 int t3_get_vpd_len(adapter_t * adapter, struct generic_vpd *vpd)
812 offset = vpd->offset;
814 while (offset < (vpd->offset + MAX_VPD_BYTES)) {
815 ret = is_end_tag(adapter, offset);
821 inc = get_desc_len(adapter, offset);
831 * t3_read_vpd - reads the stream of bytes containing a vpd structure
832 * @adapter: the adapter
833 * @vpd: contains a buffer that would hold the stream of bytes
835 * Reads the vpd structure starting at vpd->offset into vpd->data,
836 * the length of the byte stream to read is vpd->len.
839 int t3_read_vpd(adapter_t *adapter, struct generic_vpd *vpd)
843 for (i = 0; i < vpd->len; i += 4) {
844 ret = t3_seeprom_read(adapter, vpd->offset + i,
845 (u32 *) &(vpd->data[i]));
855 * get_vpd_params - read VPD parameters from VPD EEPROM
856 * @adapter: adapter to read
857 * @p: where to store the parameters
859 * Reads card parameters stored in VPD EEPROM.
861 static int get_vpd_params(adapter_t *adapter, struct vpd_params *p)
867 * Card information is normally at VPD_BASE but some early cards had
870 ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
873 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
875 for (i = 0; i < sizeof(vpd); i += 4) {
876 ret = t3_seeprom_read(adapter, addr + i,
877 (u32 *)((u8 *)&vpd + i));
882 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
883 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
884 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
885 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
886 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
887 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
888 memcpy(p->ec, vpd.ec_data, ECNUM_LEN);
890 /* Old eeproms didn't have port information */
891 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
892 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
893 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
895 p->port_type[0] = (u8)hex2int(vpd.port0_data[0]);
896 p->port_type[1] = (u8)hex2int(vpd.port1_data[0]);
897 p->port_type[2] = (u8)hex2int(vpd.port2_data[0]);
898 p->port_type[3] = (u8)hex2int(vpd.port3_data[0]);
899 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
900 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
903 for (i = 0; i < 6; i++)
904 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
905 hex2int(vpd.na_data[2 * i + 1]);
909 /* BIOS boot header */
910 typedef struct boot_header_s {
911 u8 signature[2]; /* signature */
912 u8 length; /* image length (include header) */
913 u8 offset[4]; /* initialization vector */
914 u8 reserved[19]; /* reserved */
915 u8 exheader[2]; /* offset to expansion header */
918 /* serial flash and firmware constants */
920 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
921 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
922 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
924 /* flash command opcodes */
925 SF_PROG_PAGE = 2, /* program page */
926 SF_WR_DISABLE = 4, /* disable writes */
927 SF_RD_STATUS = 5, /* read status register */
928 SF_WR_ENABLE = 6, /* enable writes */
929 SF_RD_DATA_FAST = 0xb, /* read flash */
930 SF_ERASE_SECTOR = 0xd8, /* erase sector */
932 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
933 FW_VERS_ADDR = 0x7fffc, /* flash address holding FW version */
934 FW_VERS_ADDR_PRE8 = 0x77ffc,/* flash address holding FW version pre8 */
935 FW_MIN_SIZE = 8, /* at least version and csum */
936 FW_MAX_SIZE = FW_VERS_ADDR - FW_FLASH_BOOT_ADDR,
937 FW_MAX_SIZE_PRE8 = FW_VERS_ADDR_PRE8 - FW_FLASH_BOOT_ADDR,
939 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
940 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */
941 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */
942 BOOT_MIN_SIZE = sizeof(boot_header_t), /* at least basic header */
943 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC /* 1 byte * length increment */
947 * sf1_read - read data from the serial flash
948 * @adapter: the adapter
949 * @byte_cnt: number of bytes to read
950 * @cont: whether another operation will be chained
951 * @valp: where to store the read data
953 * Reads up to 4 bytes of data from the serial flash. The location of
954 * the read needs to be specified prior to calling this by issuing the
955 * appropriate commands to the serial flash.
957 static int sf1_read(adapter_t *adapter, unsigned int byte_cnt, int cont,
962 if (!byte_cnt || byte_cnt > 4)
964 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
966 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
967 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
969 *valp = t3_read_reg(adapter, A_SF_DATA);
974 * sf1_write - write data to the serial flash
975 * @adapter: the adapter
976 * @byte_cnt: number of bytes to write
977 * @cont: whether another operation will be chained
978 * @val: value to write
980 * Writes up to 4 bytes of data to the serial flash. The location of
981 * the write needs to be specified prior to calling this by issuing the
982 * appropriate commands to the serial flash.
984 static int sf1_write(adapter_t *adapter, unsigned int byte_cnt, int cont,
987 if (!byte_cnt || byte_cnt > 4)
989 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
991 t3_write_reg(adapter, A_SF_DATA, val);
992 t3_write_reg(adapter, A_SF_OP,
993 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
994 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
998 * flash_wait_op - wait for a flash operation to complete
999 * @adapter: the adapter
1000 * @attempts: max number of polls of the status register
1001 * @delay: delay between polls in ms
1003 * Wait for a flash operation to complete by polling the status register.
1005 static int flash_wait_op(adapter_t *adapter, int attempts, int delay)
1011 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
1012 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
1016 if (--attempts == 0)
1024 * t3_read_flash - read words from serial flash
1025 * @adapter: the adapter
1026 * @addr: the start address for the read
1027 * @nwords: how many 32-bit words to read
1028 * @data: where to store the read data
1029 * @byte_oriented: whether to store data as bytes or as words
1031 * Read the specified number of 32-bit words from the serial flash.
1032 * If @byte_oriented is set the read data is stored as a byte array
1033 * (i.e., big-endian), otherwise as 32-bit words in the platform's
1034 * natural endianess.
1036 int t3_read_flash(adapter_t *adapter, unsigned int addr, unsigned int nwords,
1037 u32 *data, int byte_oriented)
1041 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
1044 addr = swab32(addr) | SF_RD_DATA_FAST;
1046 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
1047 (ret = sf1_read(adapter, 1, 1, data)) != 0)
1050 for ( ; nwords; nwords--, data++) {
1051 ret = sf1_read(adapter, 4, nwords > 1, data);
1055 *data = htonl(*data);
1061 * t3_write_flash - write up to a page of data to the serial flash
1062 * @adapter: the adapter
1063 * @addr: the start address to write
1064 * @n: length of data to write
1065 * @data: the data to write
1066 * @byte_oriented: whether to store data as bytes or as words
1068 * Writes up to a page of data (256 bytes) to the serial flash starting
1069 * at the given address.
1070 * If @byte_oriented is set the write data is stored as a 32-bit
1071 * big-endian array, otherwise in the processor's native endianess.
1074 static int t3_write_flash(adapter_t *adapter, unsigned int addr,
1075 unsigned int n, const u8 *data,
1080 unsigned int c, left, val, offset = addr & 0xff;
1082 if (addr + n > SF_SIZE || offset + n > 256)
1085 val = swab32(addr) | SF_PROG_PAGE;
1087 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1088 (ret = sf1_write(adapter, 4, 1, val)) != 0)
1091 for (left = n; left; left -= c) {
1093 val = *(const u32*)data;
1098 ret = sf1_write(adapter, c, c != left, val);
1102 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
1105 /* Read the page to verify the write succeeded */
1106 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
1111 if (memcmp(data - n, (u8 *)buf + offset, n))
1117 * t3_get_tp_version - read the tp sram version
1118 * @adapter: the adapter
1119 * @vers: where to place the version
1121 * Reads the protocol sram version from sram.
1123 int t3_get_tp_version(adapter_t *adapter, u32 *vers)
1127 /* Get version loaded in SRAM */
1128 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
1129 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
1134 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
1140 * t3_check_tpsram_version - read the tp sram version
1141 * @adapter: the adapter
1144 int t3_check_tpsram_version(adapter_t *adapter)
1148 unsigned int major, minor;
1150 if (adapter->params.rev == T3_REV_A)
1154 ret = t3_get_tp_version(adapter, &vers);
1158 vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
1160 major = G_TP_VERSION_MAJOR(vers);
1161 minor = G_TP_VERSION_MINOR(vers);
1163 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
1166 CH_ERR(adapter, "found wrong TP version (%u.%u), "
1167 "driver compiled for version %d.%d\n", major, minor,
1168 TP_VERSION_MAJOR, TP_VERSION_MINOR);
1174 * t3_check_tpsram - check if provided protocol SRAM
1175 * is compatible with this driver
1176 * @adapter: the adapter
1177 * @tp_sram: the firmware image to write
1180 * Checks if an adapter's tp sram is compatible with the driver.
1181 * Returns 0 if the versions are compatible, a negative error otherwise.
1183 int t3_check_tpsram(adapter_t *adapter, const u8 *tp_sram, unsigned int size)
1187 const u32 *p = (const u32 *)tp_sram;
1189 /* Verify checksum */
1190 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1191 csum += ntohl(p[i]);
1192 if (csum != 0xffffffff) {
1193 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
1201 enum fw_version_type {
1207 * t3_get_fw_version - read the firmware version
1208 * @adapter: the adapter
1209 * @vers: where to place the version
1211 * Reads the FW version from flash. Note that we had to move the version
1212 * due to FW size. If we don't find a valid FW version in the new location
1213 * we fall back and read the old location.
1215 int t3_get_fw_version(adapter_t *adapter, u32 *vers)
1217 int ret = t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1218 if (!ret && *vers != 0xffffffff)
1221 return t3_read_flash(adapter, FW_VERS_ADDR_PRE8, 1, vers, 0);
1225 * t3_check_fw_version - check if the FW is compatible with this driver
1226 * @adapter: the adapter
1228 * Checks if an adapter's FW is compatible with the driver. Returns 0
1229 * if the versions are compatible, a negative error otherwise.
1231 int t3_check_fw_version(adapter_t *adapter)
1235 unsigned int type, major, minor;
1237 ret = t3_get_fw_version(adapter, &vers);
1241 type = G_FW_VERSION_TYPE(vers);
1242 major = G_FW_VERSION_MAJOR(vers);
1243 minor = G_FW_VERSION_MINOR(vers);
1245 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1246 minor == FW_VERSION_MINOR)
1249 else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
1250 CH_WARN(adapter, "found old FW minor version(%u.%u), "
1251 "driver compiled for version %u.%u\n", major, minor,
1252 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1254 CH_WARN(adapter, "found newer FW version(%u.%u), "
1255 "driver compiled for version %u.%u\n", major, minor,
1256 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1263 * t3_flash_erase_sectors - erase a range of flash sectors
1264 * @adapter: the adapter
1265 * @start: the first sector to erase
1266 * @end: the last sector to erase
1268 * Erases the sectors in the given range.
1270 static int t3_flash_erase_sectors(adapter_t *adapter, int start, int end)
1272 while (start <= end) {
1275 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1276 (ret = sf1_write(adapter, 4, 0,
1277 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1278 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1286 * t3_load_fw - download firmware
1287 * @adapter: the adapter
1288 * @fw_data: the firmware image to write
1291 * Write the supplied firmware image to the card's serial flash.
1292 * The FW image has the following sections: @size - 8 bytes of code and
1293 * data, followed by 4 bytes of FW version, followed by the 32-bit
1294 * 1's complement checksum of the whole image.
1296 int t3_load_fw(adapter_t *adapter, const u8 *fw_data, unsigned int size)
1298 u32 version, csum, fw_version_addr;
1300 const u32 *p = (const u32 *)fw_data;
1301 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1303 if ((size & 3) || size < FW_MIN_SIZE)
1305 if (size - 8 > FW_MAX_SIZE)
1308 version = ntohl(*(const u32 *)(fw_data + size - 8));
1309 if (G_FW_VERSION_MAJOR(version) < 8) {
1311 fw_version_addr = FW_VERS_ADDR_PRE8;
1313 if (size - 8 > FW_MAX_SIZE_PRE8)
1316 fw_version_addr = FW_VERS_ADDR;
1318 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1319 csum += ntohl(p[i]);
1320 if (csum != 0xffffffff) {
1321 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1326 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1330 size -= 8; /* trim off version and checksum */
1331 for (addr = FW_FLASH_BOOT_ADDR; size; ) {
1332 unsigned int chunk_size = min(size, 256U);
1334 ret = t3_write_flash(adapter, addr, chunk_size, fw_data, 1);
1339 fw_data += chunk_size;
1343 ret = t3_write_flash(adapter, fw_version_addr, 4, fw_data, 1);
1346 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1351 * t3_load_boot - download boot flash
1352 * @adapter: the adapter
1353 * @boot_data: the boot image to write
1356 * Write the supplied boot image to the card's serial flash.
1357 * The boot image has the following sections: a 28-byte header and the
1360 int t3_load_boot(adapter_t *adapter, u8 *boot_data, unsigned int size)
1362 boot_header_t *header = (boot_header_t *)boot_data;
1365 unsigned int boot_sector = BOOT_FLASH_BOOT_ADDR >> 16;
1366 unsigned int boot_end = (BOOT_FLASH_BOOT_ADDR + size - 1) >> 16;
1369 * Perform some primitive sanity testing to avoid accidentally
1370 * writing garbage over the boot sectors. We ought to check for
1371 * more but it's not worth it for now ...
1373 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
1374 CH_ERR(adapter, "boot image too small/large\n");
1377 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE) {
1378 CH_ERR(adapter, "boot image missing signature\n");
1381 if (header->length * BOOT_SIZE_INC != size) {
1382 CH_ERR(adapter, "boot image header length != image length\n");
1386 ret = t3_flash_erase_sectors(adapter, boot_sector, boot_end);
1390 for (addr = BOOT_FLASH_BOOT_ADDR; size; ) {
1391 unsigned int chunk_size = min(size, 256U);
1393 ret = t3_write_flash(adapter, addr, chunk_size, boot_data, 0);
1398 boot_data += chunk_size;
1404 CH_ERR(adapter, "boot image download failed, error %d\n", ret);
1408 #define CIM_CTL_BASE 0x2000
1411 * t3_cim_ctl_blk_read - read a block from CIM control region
1412 * @adap: the adapter
1413 * @addr: the start address within the CIM control region
1414 * @n: number of words to read
1415 * @valp: where to store the result
1417 * Reads a block of 4-byte words from the CIM control region.
1419 int t3_cim_ctl_blk_read(adapter_t *adap, unsigned int addr, unsigned int n,
1424 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1427 for ( ; !ret && n--; addr += 4) {
1428 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1429 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1432 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1437 static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg,
1438 u32 *rx_hash_high, u32 *rx_hash_low)
1440 /* stop Rx unicast traffic */
1441 t3_mac_disable_exact_filters(mac);
1443 /* stop broadcast, multicast, promiscuous mode traffic */
1444 *rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG);
1445 t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1446 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1449 *rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH);
1450 t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, 0);
1452 *rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW);
1453 t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, 0);
1455 /* Leave time to drain max RX fifo */
1459 static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg,
1460 u32 rx_hash_high, u32 rx_hash_low)
1462 t3_mac_enable_exact_filters(mac);
1463 t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1464 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1466 t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, rx_hash_high);
1467 t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, rx_hash_low);
1470 static int t3_detect_link_fault(adapter_t *adapter, int port_id)
1472 struct port_info *pi = adap2pinfo(adapter, port_id);
1473 struct cmac *mac = &pi->mac;
1474 uint32_t rx_cfg, rx_hash_high, rx_hash_low;
1478 t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1479 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1481 /* clear status and make sure intr is enabled */
1482 (void) t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1483 t3_xgm_intr_enable(adapter, port_id);
1486 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, F_RXEN);
1487 t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1489 link_fault = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1490 return (link_fault & F_LINKFAULTCHANGE ? 1 : 0);
1493 static void t3_clear_faults(adapter_t *adapter, int port_id)
1495 struct port_info *pi = adap2pinfo(adapter, port_id);
1496 struct cmac *mac = &pi->mac;
1498 if (adapter->params.nports <= 2) {
1499 t3_xgm_intr_disable(adapter, pi->port_id);
1500 t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1501 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, F_XGM_INT);
1502 t3_set_reg_field(adapter, A_XGM_INT_ENABLE + mac->offset,
1503 F_XGM_INT, F_XGM_INT);
1504 t3_xgm_intr_enable(adapter, pi->port_id);
1509 * t3_link_changed - handle interface link changes
1510 * @adapter: the adapter
1511 * @port_id: the port index that changed link state
1513 * Called when a port's link settings change to propagate the new values
1514 * to the associated PHY and MAC. After performing the common tasks it
1515 * invokes an OS-specific handler.
1517 void t3_link_changed(adapter_t *adapter, int port_id)
1519 int link_ok, speed, duplex, fc, link_fault;
1520 struct port_info *pi = adap2pinfo(adapter, port_id);
1521 struct cphy *phy = &pi->phy;
1522 struct cmac *mac = &pi->mac;
1523 struct link_config *lc = &pi->link_config;
1525 link_ok = lc->link_ok;
1527 duplex = lc->duplex;
1531 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1533 if (lc->requested_fc & PAUSE_AUTONEG)
1534 fc &= lc->requested_fc;
1536 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1538 /* Update mac speed before checking for link fault. */
1539 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE &&
1540 (speed != lc->speed || duplex != lc->duplex || fc != lc->fc))
1541 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1544 * Check for link faults if any of these is true:
1545 * a) A link fault is suspected, and PHY says link ok
1546 * b) PHY link transitioned from down -> up
1548 if (adapter->params.nports <= 2 &&
1549 ((pi->link_fault && link_ok) || (!lc->link_ok && link_ok))) {
1551 link_fault = t3_detect_link_fault(adapter, port_id);
1553 if (pi->link_fault != LF_YES) {
1554 mac->stats.link_faults++;
1555 pi->link_fault = LF_YES;
1558 /* Don't report link up */
1561 /* clear faults here if this was a false alarm. */
1562 if (pi->link_fault == LF_MAYBE &&
1563 link_ok && lc->link_ok)
1564 t3_clear_faults(adapter, port_id);
1566 pi->link_fault = LF_NO;
1570 if (link_ok == lc->link_ok && speed == lc->speed &&
1571 duplex == lc->duplex && fc == lc->fc)
1572 return; /* nothing changed */
1574 lc->link_ok = (unsigned char)link_ok;
1575 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1576 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1581 /* down -> up, or up -> up with changed settings */
1583 if (adapter->params.rev > 0 && uses_xaui(adapter)) {
1584 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1585 F_TXACTENABLE | F_RXEN);
1588 t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + mac->offset,
1590 t3_mac_enable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1591 t3_set_reg_field(adapter, A_XGM_STAT_CTRL + mac->offset,
1593 t3_clear_faults(adapter, port_id);
1599 if (adapter->params.rev > 0 && uses_xaui(adapter)) {
1600 t3_write_reg(adapter,
1601 A_XGM_XAUI_ACT_CTRL + mac->offset, 0);
1604 t3_xgm_intr_disable(adapter, pi->port_id);
1605 if (adapter->params.nports <= 2) {
1606 t3_set_reg_field(adapter,
1607 A_XGM_INT_ENABLE + mac->offset,
1612 if (is_10G(adapter))
1613 pi->phy.ops->power_down(&pi->phy, 1);
1614 t3_mac_disable(mac, MAC_DIRECTION_RX);
1615 t3_link_start(phy, mac, lc);
1619 * Make sure Tx FIFO continues to drain, even as rxen is left
1620 * high to help detect and indicate remote faults.
1622 t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + mac->offset, 0,
1624 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1625 t3_write_reg(adapter, A_XGM_TX_CTRL + mac->offset, F_TXEN);
1626 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, F_RXEN);
1629 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc,
1635 * t3_link_start - apply link configuration to MAC/PHY
1636 * @phy: the PHY to setup
1637 * @mac: the MAC to setup
1638 * @lc: the requested link configuration
1640 * Set up a port's MAC and PHY according to a desired link configuration.
1641 * - If the PHY can auto-negotiate first decide what to advertise, then
1642 * enable/disable auto-negotiation as desired, and reset.
1643 * - If the PHY does not auto-negotiate just reset it.
1644 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1645 * otherwise do it later based on the outcome of auto-negotiation.
1647 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1649 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1652 if (lc->supported & SUPPORTED_Autoneg) {
1653 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1655 lc->advertising |= ADVERTISED_Asym_Pause;
1657 lc->advertising |= ADVERTISED_Pause;
1660 phy->ops->advertise(phy, lc->advertising);
1662 if (lc->autoneg == AUTONEG_DISABLE) {
1663 lc->speed = lc->requested_speed;
1664 lc->duplex = lc->requested_duplex;
1665 lc->fc = (unsigned char)fc;
1666 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1668 /* Also disables autoneg */
1669 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1670 /* PR 5666. Power phy up when doing an ifup */
1671 if (!is_10G(phy->adapter))
1672 phy->ops->power_down(phy, 0);
1674 phy->ops->autoneg_enable(phy);
1676 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1677 lc->fc = (unsigned char)fc;
1678 phy->ops->reset(phy, 0);
1684 * t3_set_vlan_accel - control HW VLAN extraction
1685 * @adapter: the adapter
1686 * @ports: bitmap of adapter ports to operate on
1687 * @on: enable (1) or disable (0) HW VLAN extraction
1689 * Enables or disables HW extraction of VLAN tags for the given port.
1691 void t3_set_vlan_accel(adapter_t *adapter, unsigned int ports, int on)
1693 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1694 ports << S_VLANEXTRACTIONENABLE,
1695 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1699 unsigned int mask; /* bits to check in interrupt status */
1700 const char *msg; /* message to print or NULL */
1701 short stat_idx; /* stat counter to increment or -1 */
1702 unsigned short fatal; /* whether the condition reported is fatal */
1706 * t3_handle_intr_status - table driven interrupt handler
1707 * @adapter: the adapter that generated the interrupt
1708 * @reg: the interrupt status register to process
1709 * @mask: a mask to apply to the interrupt status
1710 * @acts: table of interrupt actions
1711 * @stats: statistics counters tracking interrupt occurences
1713 * A table driven interrupt handler that applies a set of masks to an
1714 * interrupt status word and performs the corresponding actions if the
1715 * interrupts described by the mask have occured. The actions include
1716 * optionally printing a warning or alert message, and optionally
1717 * incrementing a stat counter. The table is terminated by an entry
1718 * specifying mask 0. Returns the number of fatal interrupt conditions.
1720 static int t3_handle_intr_status(adapter_t *adapter, unsigned int reg,
1722 const struct intr_info *acts,
1723 unsigned long *stats)
1726 unsigned int status = t3_read_reg(adapter, reg) & mask;
1728 for ( ; acts->mask; ++acts) {
1729 if (!(status & acts->mask)) continue;
1732 CH_ALERT(adapter, "%s (0x%x)\n",
1733 acts->msg, status & acts->mask);
1734 } else if (acts->msg)
1735 CH_WARN(adapter, "%s (0x%x)\n",
1736 acts->msg, status & acts->mask);
1737 if (acts->stat_idx >= 0)
1738 stats[acts->stat_idx]++;
1740 if (status) /* clear processed interrupts */
1741 t3_write_reg(adapter, reg, status);
1745 #define SGE_INTR_MASK (F_RSPQDISABLED | \
1746 F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1747 F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1748 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1749 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1750 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1752 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1753 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1755 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1756 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1757 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1759 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1760 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1761 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1762 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1763 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1764 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1765 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1766 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1767 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1768 F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1769 F_TXPARERR | V_BISTERR(M_BISTERR))
1770 #define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1771 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1772 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1773 #define ULPTX_INTR_MASK 0xfc
1774 #define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1775 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1776 F_ZERO_SWITCH_ERROR)
1777 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1778 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1779 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1780 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1781 F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1782 F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1783 F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1784 F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1785 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1786 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1787 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1788 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1789 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1790 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1791 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1792 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1793 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1794 V_MCAPARERRENB(M_MCAPARERRENB))
1795 #define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE)
1796 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1797 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1798 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1799 F_MPS0 | F_CPL_SWITCH)
1801 * Interrupt handler for the PCIX1 module.
1803 static void pci_intr_handler(adapter_t *adapter)
1805 static struct intr_info pcix1_intr_info[] = {
1806 { F_MSTDETPARERR, "PCI master detected parity error", -1, 1 },
1807 { F_SIGTARABT, "PCI signaled target abort", -1, 1 },
1808 { F_RCVTARABT, "PCI received target abort", -1, 1 },
1809 { F_RCVMSTABT, "PCI received master abort", -1, 1 },
1810 { F_SIGSYSERR, "PCI signaled system error", -1, 1 },
1811 { F_DETPARERR, "PCI detected parity error", -1, 1 },
1812 { F_SPLCMPDIS, "PCI split completion discarded", -1, 1 },
1813 { F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1 },
1814 { F_RCVSPLCMPERR, "PCI received split completion error", -1,
1816 { F_DETCORECCERR, "PCI correctable ECC error",
1817 STAT_PCI_CORR_ECC, 0 },
1818 { F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1 },
1819 { F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1820 { V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1822 { V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1824 { V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1826 { V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1831 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1832 pcix1_intr_info, adapter->irq_stats))
1833 t3_fatal_err(adapter);
1837 * Interrupt handler for the PCIE module.
1839 static void pcie_intr_handler(adapter_t *adapter)
1841 static struct intr_info pcie_intr_info[] = {
1842 { F_PEXERR, "PCI PEX error", -1, 1 },
1844 "PCI unexpected split completion DMA read error", -1, 1 },
1846 "PCI unexpected split completion DMA command error", -1, 1 },
1847 { F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1848 { F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1 },
1849 { F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1 },
1850 { F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1 },
1851 { V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1852 "PCI MSI-X table/PBA parity error", -1, 1 },
1853 { F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1 },
1854 { F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1 },
1855 { F_RXPARERR, "PCI Rx parity error", -1, 1 },
1856 { F_TXPARERR, "PCI Tx parity error", -1, 1 },
1857 { V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1 },
1861 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1862 CH_ALERT(adapter, "PEX error code 0x%x\n",
1863 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1865 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1866 pcie_intr_info, adapter->irq_stats))
1867 t3_fatal_err(adapter);
1871 * TP interrupt handler.
1873 static void tp_intr_handler(adapter_t *adapter)
1875 static struct intr_info tp_intr_info[] = {
1876 { 0xffffff, "TP parity error", -1, 1 },
1877 { 0x1000000, "TP out of Rx pages", -1, 1 },
1878 { 0x2000000, "TP out of Tx pages", -1, 1 },
1881 static struct intr_info tp_intr_info_t3c[] = {
1882 { 0x1fffffff, "TP parity error", -1, 1 },
1883 { F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1 },
1884 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1888 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1889 adapter->params.rev < T3_REV_C ?
1890 tp_intr_info : tp_intr_info_t3c, NULL))
1891 t3_fatal_err(adapter);
1895 * CIM interrupt handler.
1897 static void cim_intr_handler(adapter_t *adapter)
1899 static struct intr_info cim_intr_info[] = {
1900 { F_RSVDSPACEINT, "CIM reserved space write", -1, 1 },
1901 { F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1 },
1902 { F_FLASHRANGEINT, "CIM flash address out of range", -1, 1 },
1903 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1904 { F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1 },
1905 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1906 { F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1 },
1907 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1908 { F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1 },
1909 { F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1 },
1910 { F_BLKRDPLINT, "CIM block read from PL space", -1, 1 },
1911 { F_BLKWRPLINT, "CIM block write to PL space", -1, 1 },
1912 { F_DRAMPARERR, "CIM DRAM parity error", -1, 1 },
1913 { F_ICACHEPARERR, "CIM icache parity error", -1, 1 },
1914 { F_DCACHEPARERR, "CIM dcache parity error", -1, 1 },
1915 { F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1 },
1916 { F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1 },
1917 { F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1 },
1918 { F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1 },
1919 { F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1 },
1920 { F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1 },
1921 { F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1 },
1922 { F_ITAGPARERR, "CIM itag parity error", -1, 1 },
1923 { F_DTAGPARERR, "CIM dtag parity error", -1, 1 },
1927 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, CIM_INTR_MASK,
1928 cim_intr_info, NULL))
1929 t3_fatal_err(adapter);
1933 * ULP RX interrupt handler.
1935 static void ulprx_intr_handler(adapter_t *adapter)
1937 static struct intr_info ulprx_intr_info[] = {
1938 { F_PARERRDATA, "ULP RX data parity error", -1, 1 },
1939 { F_PARERRPCMD, "ULP RX command parity error", -1, 1 },
1940 { F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1 },
1941 { F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1 },
1942 { F_ARBFPERR, "ULP RX ArbF parity error", -1, 1 },
1943 { F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1 },
1944 { F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1 },
1945 { F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1 },
1949 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1950 ulprx_intr_info, NULL))
1951 t3_fatal_err(adapter);
1955 * ULP TX interrupt handler.
1957 static void ulptx_intr_handler(adapter_t *adapter)
1959 static struct intr_info ulptx_intr_info[] = {
1960 { F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1961 STAT_ULP_CH0_PBL_OOB, 0 },
1962 { F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1963 STAT_ULP_CH1_PBL_OOB, 0 },
1964 { 0xfc, "ULP TX parity error", -1, 1 },
1968 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1969 ulptx_intr_info, adapter->irq_stats))
1970 t3_fatal_err(adapter);
1973 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1974 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1975 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1976 F_ICSPI1_TX_FRAMING_ERROR)
1977 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1978 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1979 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1980 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1983 * PM TX interrupt handler.
1985 static void pmtx_intr_handler(adapter_t *adapter)
1987 static struct intr_info pmtx_intr_info[] = {
1988 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1989 { ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1 },
1990 { OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1 },
1991 { V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1992 "PMTX ispi parity error", -1, 1 },
1993 { V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1994 "PMTX ospi parity error", -1, 1 },
1998 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1999 pmtx_intr_info, NULL))
2000 t3_fatal_err(adapter);
2003 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
2004 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
2005 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
2006 F_IESPI1_TX_FRAMING_ERROR)
2007 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
2008 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
2009 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
2010 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
2013 * PM RX interrupt handler.
2015 static void pmrx_intr_handler(adapter_t *adapter)
2017 static struct intr_info pmrx_intr_info[] = {
2018 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
2019 { IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1 },
2020 { OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1 },
2021 { V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
2022 "PMRX ispi parity error", -1, 1 },
2023 { V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
2024 "PMRX ospi parity error", -1, 1 },
2028 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
2029 pmrx_intr_info, NULL))
2030 t3_fatal_err(adapter);
2034 * CPL switch interrupt handler.
2036 static void cplsw_intr_handler(adapter_t *adapter)
2038 static struct intr_info cplsw_intr_info[] = {
2039 { F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1 },
2040 { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 },
2041 { F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1 },
2042 { F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1 },
2043 { F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1 },
2044 { F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1 },
2048 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
2049 cplsw_intr_info, NULL))
2050 t3_fatal_err(adapter);
2054 * MPS interrupt handler.
2056 static void mps_intr_handler(adapter_t *adapter)
2058 static struct intr_info mps_intr_info[] = {
2059 { 0x1ff, "MPS parity error", -1, 1 },
2063 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
2064 mps_intr_info, NULL))
2065 t3_fatal_err(adapter);
2068 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
2071 * MC7 interrupt handler.
2073 static void mc7_intr_handler(struct mc7 *mc7)
2075 adapter_t *adapter = mc7->adapter;
2076 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
2079 mc7->stats.corr_err++;
2080 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
2081 "data 0x%x 0x%x 0x%x\n", mc7->name,
2082 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
2083 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
2084 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
2085 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
2089 mc7->stats.uncorr_err++;
2090 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
2091 "data 0x%x 0x%x 0x%x\n", mc7->name,
2092 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
2093 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
2094 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
2095 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
2099 mc7->stats.parity_err++;
2100 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
2101 mc7->name, G_PE(cause));
2107 if (adapter->params.rev > 0)
2108 addr = t3_read_reg(adapter,
2109 mc7->offset + A_MC7_ERR_ADDR);
2110 mc7->stats.addr_err++;
2111 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
2115 if (cause & MC7_INTR_FATAL)
2116 t3_fatal_err(adapter);
2118 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
2121 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
2122 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
2124 * XGMAC interrupt handler.
2126 static int mac_intr_handler(adapter_t *adap, unsigned int idx)
2129 struct port_info *pi;
2132 idx = idx == 0 ? 0 : adapter_info(adap)->nports0; /* MAC idx -> port */
2133 pi = adap2pinfo(adap, idx);
2137 * We mask out interrupt causes for which we're not taking interrupts.
2138 * This allows us to use polling logic to monitor some of the other
2139 * conditions when taking interrupts would impose too much load on the
2142 cause = (t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset)
2143 & ~(F_RXFIFO_OVERFLOW));
2145 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
2146 mac->stats.tx_fifo_parity_err++;
2147 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
2149 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
2150 mac->stats.rx_fifo_parity_err++;
2151 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
2153 if (cause & F_TXFIFO_UNDERRUN)
2154 mac->stats.tx_fifo_urun++;
2155 if (cause & F_RXFIFO_OVERFLOW)
2156 mac->stats.rx_fifo_ovfl++;
2157 if (cause & V_SERDES_LOS(M_SERDES_LOS))
2158 mac->stats.serdes_signal_loss++;
2159 if (cause & F_XAUIPCSCTCERR)
2160 mac->stats.xaui_pcs_ctc_err++;
2161 if (cause & F_XAUIPCSALIGNCHANGE)
2162 mac->stats.xaui_pcs_align_change++;
2163 if (cause & F_XGM_INT) {
2164 t3_set_reg_field(adap,
2165 A_XGM_INT_ENABLE + mac->offset,
2168 /* link fault suspected */
2169 pi->link_fault = LF_MAYBE;
2172 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
2174 if (cause & XGM_INTR_FATAL)
2181 * Interrupt handler for PHY events.
2183 int t3_phy_intr_handler(adapter_t *adapter)
2185 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
2187 for_each_port(adapter, i) {
2188 struct port_info *p = adap2pinfo(adapter, i);
2190 if (!(p->phy.caps & SUPPORTED_IRQ))
2193 if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
2194 int phy_cause = p->phy.ops->intr_handler(&p->phy);
2196 if (phy_cause & cphy_cause_link_change)
2197 t3_link_changed(adapter, i);
2198 if (phy_cause & cphy_cause_fifo_error)
2199 p->phy.fifo_errors++;
2200 if (phy_cause & cphy_cause_module_change)
2201 t3_os_phymod_changed(adapter, i);
2202 if (phy_cause & cphy_cause_alarm)
2203 CH_WARN(adapter, "Operation affected due to "
2204 "adverse environment. Check the spec "
2205 "sheet for corrective action.");
2209 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
2214 * t3_slow_intr_handler - control path interrupt handler
2215 * @adapter: the adapter
2217 * T3 interrupt handler for non-data interrupt events, e.g., errors.
2218 * The designation 'slow' is because it involves register reads, while
2219 * data interrupts typically don't involve any MMIOs.
2221 int t3_slow_intr_handler(adapter_t *adapter)
2223 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
2225 cause &= adapter->slow_intr_mask;
2228 if (cause & F_PCIM0) {
2229 if (is_pcie(adapter))
2230 pcie_intr_handler(adapter);
2232 pci_intr_handler(adapter);
2235 t3_sge_err_intr_handler(adapter);
2236 if (cause & F_MC7_PMRX)
2237 mc7_intr_handler(&adapter->pmrx);
2238 if (cause & F_MC7_PMTX)
2239 mc7_intr_handler(&adapter->pmtx);
2240 if (cause & F_MC7_CM)
2241 mc7_intr_handler(&adapter->cm);
2243 cim_intr_handler(adapter);
2245 tp_intr_handler(adapter);
2246 if (cause & F_ULP2_RX)
2247 ulprx_intr_handler(adapter);
2248 if (cause & F_ULP2_TX)
2249 ulptx_intr_handler(adapter);
2250 if (cause & F_PM1_RX)
2251 pmrx_intr_handler(adapter);
2252 if (cause & F_PM1_TX)
2253 pmtx_intr_handler(adapter);
2254 if (cause & F_CPL_SWITCH)
2255 cplsw_intr_handler(adapter);
2257 mps_intr_handler(adapter);
2259 t3_mc5_intr_handler(&adapter->mc5);
2260 if (cause & F_XGMAC0_0)
2261 mac_intr_handler(adapter, 0);
2262 if (cause & F_XGMAC0_1)
2263 mac_intr_handler(adapter, 1);
2264 if (cause & F_T3DBG)
2265 t3_os_ext_intr_handler(adapter);
2267 /* Clear the interrupts just processed. */
2268 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
2269 (void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
2273 static unsigned int calc_gpio_intr(adapter_t *adap)
2275 unsigned int i, gpi_intr = 0;
2277 for_each_port(adap, i)
2278 if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
2279 adapter_info(adap)->gpio_intr[i])
2280 gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
2285 * t3_intr_enable - enable interrupts
2286 * @adapter: the adapter whose interrupts should be enabled
2288 * Enable interrupts by setting the interrupt enable registers of the
2289 * various HW modules and then enabling the top-level interrupt
2292 void t3_intr_enable(adapter_t *adapter)
2294 static struct addr_val_pair intr_en_avp[] = {
2295 { A_MC7_INT_ENABLE, MC7_INTR_MASK },
2296 { A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2298 { A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2300 { A_MC5_DB_INT_ENABLE, MC5_INTR_MASK },
2301 { A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK },
2302 { A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK },
2303 { A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK },
2304 { A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK },
2305 { A_MPS_INT_ENABLE, MPS_INTR_MASK },
2308 adapter->slow_intr_mask = PL_INTR_MASK;
2310 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
2311 t3_write_reg(adapter, A_TP_INT_ENABLE,
2312 adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
2313 t3_write_reg(adapter, A_SG_INT_ENABLE, SGE_INTR_MASK);
2315 if (adapter->params.rev > 0) {
2316 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
2317 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
2318 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
2319 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
2320 F_PBL_BOUND_ERR_CH1);
2322 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
2323 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
2326 t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
2328 if (is_pcie(adapter))
2329 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
2331 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
2332 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
2333 (void) t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
2337 * t3_intr_disable - disable a card's interrupts
2338 * @adapter: the adapter whose interrupts should be disabled
2340 * Disable interrupts. We only disable the top-level interrupt
2341 * concentrator and the SGE data interrupts.
2343 void t3_intr_disable(adapter_t *adapter)
2345 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
2346 (void) t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
2347 adapter->slow_intr_mask = 0;
2351 * t3_intr_clear - clear all interrupts
2352 * @adapter: the adapter whose interrupts should be cleared
2354 * Clears all interrupts.
2356 void t3_intr_clear(adapter_t *adapter)
2358 static const unsigned int cause_reg_addr[] = {
2360 A_SG_RSPQ_FL_STATUS,
2363 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2364 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2365 A_CIM_HOST_INT_CAUSE,
2378 /* Clear PHY and MAC interrupts for each port. */
2379 for_each_port(adapter, i)
2380 t3_port_intr_clear(adapter, i);
2382 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
2383 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
2385 if (is_pcie(adapter))
2386 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
2387 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
2388 (void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
2391 void t3_xgm_intr_enable(adapter_t *adapter, int idx)
2393 struct port_info *pi = adap2pinfo(adapter, idx);
2395 t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset,
2396 XGM_EXTRA_INTR_MASK);
2399 void t3_xgm_intr_disable(adapter_t *adapter, int idx)
2401 struct port_info *pi = adap2pinfo(adapter, idx);
2403 t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset,
2408 * t3_port_intr_enable - enable port-specific interrupts
2409 * @adapter: associated adapter
2410 * @idx: index of port whose interrupts should be enabled
2412 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
2415 void t3_port_intr_enable(adapter_t *adapter, int idx)
2417 struct port_info *pi = adap2pinfo(adapter, idx);
2419 t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, XGM_INTR_MASK);
2420 pi->phy.ops->intr_enable(&pi->phy);
2424 * t3_port_intr_disable - disable port-specific interrupts
2425 * @adapter: associated adapter
2426 * @idx: index of port whose interrupts should be disabled
2428 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
2431 void t3_port_intr_disable(adapter_t *adapter, int idx)
2433 struct port_info *pi = adap2pinfo(adapter, idx);
2435 t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, 0);
2436 pi->phy.ops->intr_disable(&pi->phy);
2440 * t3_port_intr_clear - clear port-specific interrupts
2441 * @adapter: associated adapter
2442 * @idx: index of port whose interrupts to clear
2444 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
2447 void t3_port_intr_clear(adapter_t *adapter, int idx)
2449 struct port_info *pi = adap2pinfo(adapter, idx);
2451 t3_write_reg(adapter, A_XGM_INT_CAUSE + pi->mac.offset, 0xffffffff);
2452 pi->phy.ops->intr_clear(&pi->phy);
2455 #define SG_CONTEXT_CMD_ATTEMPTS 100
2458 * t3_sge_write_context - write an SGE context
2459 * @adapter: the adapter
2460 * @id: the context id
2461 * @type: the context type
2463 * Program an SGE context with the values already loaded in the
2464 * CONTEXT_DATA? registers.
2466 static int t3_sge_write_context(adapter_t *adapter, unsigned int id,
2469 if (type == F_RESPONSEQ) {
2471 * Can't write the Response Queue Context bits for
2472 * Interrupt Armed or the Reserve bits after the chip
2473 * has been initialized out of reset. Writing to these
2474 * bits can confuse the hardware.
2476 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2477 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2478 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
2479 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2481 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2482 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2483 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2484 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2486 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2487 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2488 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2489 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2493 * clear_sge_ctxt - completely clear an SGE context
2494 * @adapter: the adapter
2495 * @id: the context id
2496 * @type: the context type
2498 * Completely clear an SGE context. Used predominantly at post-reset
2499 * initialization. Note in particular that we don't skip writing to any
2500 * "sensitive bits" in the contexts the way that t3_sge_write_context()
2503 static int clear_sge_ctxt(adapter_t *adap, unsigned int id, unsigned int type)
2505 t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2506 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2507 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2508 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2509 t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
2510 t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
2511 t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
2512 t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
2513 t3_write_reg(adap, A_SG_CONTEXT_CMD,
2514 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2515 return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2516 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2520 * t3_sge_init_ecntxt - initialize an SGE egress context
2521 * @adapter: the adapter to configure
2522 * @id: the context id
2523 * @gts_enable: whether to enable GTS for the context
2524 * @type: the egress context type
2525 * @respq: associated response queue
2526 * @base_addr: base address of queue
2527 * @size: number of queue entries
2529 * @gen: initial generation value for the context
2530 * @cidx: consumer pointer
2532 * Initialize an SGE egress context and make it ready for use. If the
2533 * platform allows concurrent context operations, the caller is
2534 * responsible for appropriate locking.
2536 int t3_sge_init_ecntxt(adapter_t *adapter, unsigned int id, int gts_enable,
2537 enum sge_context_type type, int respq, u64 base_addr,
2538 unsigned int size, unsigned int token, int gen,
2541 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2543 if (base_addr & 0xfff) /* must be 4K aligned */
2545 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2549 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2550 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2551 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2552 V_EC_BASE_LO((u32)base_addr & 0xffff));
2554 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, (u32)base_addr);
2556 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2557 V_EC_BASE_HI((u32)base_addr & 0xf) | V_EC_RESPQ(respq) |
2558 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2560 return t3_sge_write_context(adapter, id, F_EGRESS);
2564 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2565 * @adapter: the adapter to configure
2566 * @id: the context id
2567 * @gts_enable: whether to enable GTS for the context
2568 * @base_addr: base address of queue
2569 * @size: number of queue entries
2570 * @bsize: size of each buffer for this queue
2571 * @cong_thres: threshold to signal congestion to upstream producers
2572 * @gen: initial generation value for the context
2573 * @cidx: consumer pointer
2575 * Initialize an SGE free list context and make it ready for use. The
2576 * caller is responsible for ensuring only one context operation occurs
2579 int t3_sge_init_flcntxt(adapter_t *adapter, unsigned int id, int gts_enable,
2580 u64 base_addr, unsigned int size, unsigned int bsize,
2581 unsigned int cong_thres, int gen, unsigned int cidx)
2583 if (base_addr & 0xfff) /* must be 4K aligned */
2585 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2589 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, (u32)base_addr);
2591 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2592 V_FL_BASE_HI((u32)base_addr) |
2593 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2594 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2595 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2596 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2597 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2598 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2599 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2600 return t3_sge_write_context(adapter, id, F_FREELIST);
2604 * t3_sge_init_rspcntxt - initialize an SGE response queue context
2605 * @adapter: the adapter to configure
2606 * @id: the context id
2607 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2608 * @base_addr: base address of queue
2609 * @size: number of queue entries
2610 * @fl_thres: threshold for selecting the normal or jumbo free list
2611 * @gen: initial generation value for the context
2612 * @cidx: consumer pointer
2614 * Initialize an SGE response queue context and make it ready for use.
2615 * The caller is responsible for ensuring only one context operation
2618 int t3_sge_init_rspcntxt(adapter_t *adapter, unsigned int id, int irq_vec_idx,
2619 u64 base_addr, unsigned int size,
2620 unsigned int fl_thres, int gen, unsigned int cidx)
2622 unsigned int ctrl, intr = 0;
2624 if (base_addr & 0xfff) /* must be 4K aligned */
2626 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2630 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2632 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
2634 ctrl = t3_read_reg(adapter, A_SG_CONTROL);
2635 if ((irq_vec_idx > 0) ||
2636 ((irq_vec_idx == 0) && !(ctrl & F_ONEINTMULTQ)))
2637 intr = F_RQ_INTR_EN;
2638 if (irq_vec_idx >= 0)
2639 intr |= V_RQ_MSI_VEC(irq_vec_idx);
2640 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2641 V_CQ_BASE_HI((u32)base_addr) | intr | V_RQ_GEN(gen));
2642 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2643 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2647 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2648 * @adapter: the adapter to configure
2649 * @id: the context id
2650 * @base_addr: base address of queue
2651 * @size: number of queue entries
2652 * @rspq: response queue for async notifications
2653 * @ovfl_mode: CQ overflow mode
2654 * @credits: completion queue credits
2655 * @credit_thres: the credit threshold
2657 * Initialize an SGE completion queue context and make it ready for use.
2658 * The caller is responsible for ensuring only one context operation
2661 int t3_sge_init_cqcntxt(adapter_t *adapter, unsigned int id, u64 base_addr,
2662 unsigned int size, int rspq, int ovfl_mode,
2663 unsigned int credits, unsigned int credit_thres)
2665 if (base_addr & 0xfff) /* must be 4K aligned */
2667 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2671 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2672 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
2674 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2675 V_CQ_BASE_HI((u32)base_addr) | V_CQ_RSPQ(rspq) |
2676 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2677 V_CQ_ERR(ovfl_mode));
2678 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2679 V_CQ_CREDIT_THRES(credit_thres));
2680 return t3_sge_write_context(adapter, id, F_CQ);
2684 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2685 * @adapter: the adapter
2686 * @id: the egress context id
2687 * @enable: enable (1) or disable (0) the context
2689 * Enable or disable an SGE egress context. The caller is responsible for
2690 * ensuring only one context operation occurs at a time.
2692 int t3_sge_enable_ecntxt(adapter_t *adapter, unsigned int id, int enable)
2694 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2697 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2698 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2699 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2700 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2701 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2702 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2703 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2704 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2705 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2709 * t3_sge_disable_fl - disable an SGE free-buffer list
2710 * @adapter: the adapter
2711 * @id: the free list context id
2713 * Disable an SGE free-buffer list. The caller is responsible for
2714 * ensuring only one context operation occurs at a time.
2716 int t3_sge_disable_fl(adapter_t *adapter, unsigned int id)
2718 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2721 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2722 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2723 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2724 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2725 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2726 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2727 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2728 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2729 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2733 * t3_sge_disable_rspcntxt - disable an SGE response queue
2734 * @adapter: the adapter
2735 * @id: the response queue context id
2737 * Disable an SGE response queue. The caller is responsible for
2738 * ensuring only one context operation occurs at a time.
2740 int t3_sge_disable_rspcntxt(adapter_t *adapter, unsigned int id)
2742 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2745 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2746 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2747 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2748 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2749 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2750 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2751 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2752 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2753 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2757 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2758 * @adapter: the adapter
2759 * @id: the completion queue context id
2761 * Disable an SGE completion queue. The caller is responsible for
2762 * ensuring only one context operation occurs at a time.
2764 int t3_sge_disable_cqcntxt(adapter_t *adapter, unsigned int id)
2766 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2769 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2770 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2771 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2772 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2773 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2774 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2775 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2776 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2777 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2781 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2782 * @adapter: the adapter
2783 * @id: the context id
2784 * @op: the operation to perform
2785 * @credits: credits to return to the CQ
2787 * Perform the selected operation on an SGE completion queue context.
2788 * The caller is responsible for ensuring only one context operation
2791 * For most operations the function returns the current HW position in
2792 * the completion queue.
2794 int t3_sge_cqcntxt_op(adapter_t *adapter, unsigned int id, unsigned int op,
2795 unsigned int credits)
2799 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2802 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2803 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2804 V_CONTEXT(id) | F_CQ);
2805 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2806 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2809 if (op >= 2 && op < 7) {
2810 if (adapter->params.rev > 0)
2811 return G_CQ_INDEX(val);
2813 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2814 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2815 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2816 F_CONTEXT_CMD_BUSY, 0,
2817 SG_CONTEXT_CMD_ATTEMPTS, 1))
2819 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2825 * t3_sge_read_context - read an SGE context
2826 * @type: the context type
2827 * @adapter: the adapter
2828 * @id: the context id
2829 * @data: holds the retrieved context
2831 * Read an SGE egress context. The caller is responsible for ensuring
2832 * only one context operation occurs at a time.
2834 static int t3_sge_read_context(unsigned int type, adapter_t *adapter,
2835 unsigned int id, u32 data[4])
2837 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2840 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2841 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2842 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2843 SG_CONTEXT_CMD_ATTEMPTS, 1))
2845 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2846 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2847 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2848 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2853 * t3_sge_read_ecntxt - read an SGE egress context
2854 * @adapter: the adapter
2855 * @id: the context id
2856 * @data: holds the retrieved context
2858 * Read an SGE egress context. The caller is responsible for ensuring
2859 * only one context operation occurs at a time.
2861 int t3_sge_read_ecntxt(adapter_t *adapter, unsigned int id, u32 data[4])
2865 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2869 * t3_sge_read_cq - read an SGE CQ context
2870 * @adapter: the adapter
2871 * @id: the context id
2872 * @data: holds the retrieved context
2874 * Read an SGE CQ context. The caller is responsible for ensuring
2875 * only one context operation occurs at a time.
2877 int t3_sge_read_cq(adapter_t *adapter, unsigned int id, u32 data[4])
2881 return t3_sge_read_context(F_CQ, adapter, id, data);
2885 * t3_sge_read_fl - read an SGE free-list context
2886 * @adapter: the adapter
2887 * @id: the context id
2888 * @data: holds the retrieved context
2890 * Read an SGE free-list context. The caller is responsible for ensuring
2891 * only one context operation occurs at a time.
2893 int t3_sge_read_fl(adapter_t *adapter, unsigned int id, u32 data[4])
2895 if (id >= SGE_QSETS * 2)
2897 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2901 * t3_sge_read_rspq - read an SGE response queue context
2902 * @adapter: the adapter
2903 * @id: the context id
2904 * @data: holds the retrieved context
2906 * Read an SGE response queue context. The caller is responsible for
2907 * ensuring only one context operation occurs at a time.
2909 int t3_sge_read_rspq(adapter_t *adapter, unsigned int id, u32 data[4])
2911 if (id >= SGE_QSETS)
2913 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2917 * t3_config_rss - configure Rx packet steering
2918 * @adapter: the adapter
2919 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2920 * @cpus: values for the CPU lookup table (0xff terminated)
2921 * @rspq: values for the response queue lookup table (0xffff terminated)
2923 * Programs the receive packet steering logic. @cpus and @rspq provide
2924 * the values for the CPU and response queue lookup tables. If they
2925 * provide fewer values than the size of the tables the supplied values
2926 * are used repeatedly until the tables are fully populated.
2928 void t3_config_rss(adapter_t *adapter, unsigned int rss_config, const u8 *cpus,
2931 int i, j, cpu_idx = 0, q_idx = 0;
2934 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2937 for (j = 0; j < 2; ++j) {
2938 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2939 if (cpus[cpu_idx] == 0xff)
2942 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2946 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2947 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2948 (i << 16) | rspq[q_idx++]);
2949 if (rspq[q_idx] == 0xffff)
2953 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2957 * t3_read_rss - read the contents of the RSS tables
2958 * @adapter: the adapter
2959 * @lkup: holds the contents of the RSS lookup table
2960 * @map: holds the contents of the RSS map table
2962 * Reads the contents of the receive packet steering tables.
2964 int t3_read_rss(adapter_t *adapter, u8 *lkup, u16 *map)
2970 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2971 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2973 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2974 if (!(val & 0x80000000))
2977 *lkup++ = (u8)(val >> 8);
2981 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2982 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2984 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2985 if (!(val & 0x80000000))
2993 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2994 * @adap: the adapter
2995 * @enable: 1 to select offload mode, 0 for regular NIC
2997 * Switches TP to NIC/offload mode.
2999 void t3_tp_set_offload_mode(adapter_t *adap, int enable)
3001 if (is_offload(adap) || !enable)
3002 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
3003 V_NICMODE(!enable));
3007 * tp_wr_bits_indirect - set/clear bits in an indirect TP register
3008 * @adap: the adapter
3009 * @addr: the indirect TP register address
3010 * @mask: specifies the field within the register to modify
3011 * @val: new value for the field
3013 * Sets a field of an indirect TP register to the given value.
3015 static void tp_wr_bits_indirect(adapter_t *adap, unsigned int addr,
3016 unsigned int mask, unsigned int val)
3018 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
3019 val |= t3_read_reg(adap, A_TP_PIO_DATA) & ~mask;
3020 t3_write_reg(adap, A_TP_PIO_DATA, val);
3024 * t3_enable_filters - enable the HW filters
3025 * @adap: the adapter
3027 * Enables the HW filters for NIC traffic.
3029 void t3_enable_filters(adapter_t *adap)
3031 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE, 0);
3032 t3_set_reg_field(adap, A_MC5_DB_CONFIG, 0, F_FILTEREN);
3033 t3_set_reg_field(adap, A_TP_GLOBAL_CONFIG, 0, V_FIVETUPLELOOKUP(3));
3034 tp_wr_bits_indirect(adap, A_TP_INGRESS_CONFIG, 0, F_LOOKUPEVERYPKT);
3038 * t3_disable_filters - disable the HW filters
3039 * @adap: the adapter
3041 * Disables the HW filters for NIC traffic.
3043 void t3_disable_filters(adapter_t *adap)
3045 /* note that we don't want to revert to NIC-only mode */
3046 t3_set_reg_field(adap, A_MC5_DB_CONFIG, F_FILTEREN, 0);
3047 t3_set_reg_field(adap, A_TP_GLOBAL_CONFIG,
3048 V_FIVETUPLELOOKUP(M_FIVETUPLELOOKUP), 0);
3049 tp_wr_bits_indirect(adap, A_TP_INGRESS_CONFIG, F_LOOKUPEVERYPKT, 0);
3053 * pm_num_pages - calculate the number of pages of the payload memory
3054 * @mem_size: the size of the payload memory
3055 * @pg_size: the size of each payload memory page
3057 * Calculate the number of pages, each of the given size, that fit in a
3058 * memory of the specified size, respecting the HW requirement that the
3059 * number of pages must be a multiple of 24.
3061 static inline unsigned int pm_num_pages(unsigned int mem_size,
3062 unsigned int pg_size)
3064 unsigned int n = mem_size / pg_size;
3069 #define mem_region(adap, start, size, reg) \
3070 t3_write_reg((adap), A_ ## reg, (start)); \
3074 * partition_mem - partition memory and configure TP memory settings
3075 * @adap: the adapter
3076 * @p: the TP parameters
3078 * Partitions context and payload memory and configures TP's memory
3081 static void partition_mem(adapter_t *adap, const struct tp_params *p)
3083 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
3084 unsigned int timers = 0, timers_shift = 22;
3086 if (adap->params.rev > 0) {
3087 if (tids <= 16 * 1024) {
3090 } else if (tids <= 64 * 1024) {
3093 } else if (tids <= 256 * 1024) {
3099 t3_write_reg(adap, A_TP_PMM_SIZE,
3100 p->chan_rx_size | (p->chan_tx_size >> 16));
3102 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
3103 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
3104 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
3105 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
3106 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
3108 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
3109 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
3110 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
3112 pstructs = p->rx_num_pgs + p->tx_num_pgs;
3113 /* Add a bit of headroom and make multiple of 24 */
3115 pstructs -= pstructs % 24;
3116 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
3118 m = tids * TCB_SIZE;
3119 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
3120 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
3121 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
3122 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
3123 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
3124 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
3125 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
3126 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
3128 m = (m + 4095) & ~0xfff;
3129 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
3130 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
3132 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
3133 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
3134 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
3136 adap->params.mc5.nservers += m - tids;
3139 static inline void tp_wr_indirect(adapter_t *adap, unsigned int addr, u32 val)
3141 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
3142 t3_write_reg(adap, A_TP_PIO_DATA, val);
3145 static inline u32 tp_rd_indirect(adapter_t *adap, unsigned int addr)
3147 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
3148 return t3_read_reg(adap, A_TP_PIO_DATA);
3151 static void tp_config(adapter_t *adap, const struct tp_params *p)
3153 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
3154 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
3155 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
3156 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
3157 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
3158 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
3159 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
3160 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
3161 V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
3162 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
3163 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
3164 F_IPV6ENABLE | F_NICMODE);
3165 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
3166 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
3167 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
3168 adap->params.rev > 0 ? F_ENABLEESND :
3170 t3_set_reg_field(adap, A_TP_PC_CONFIG,
3172 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
3173 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
3174 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
3175 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
3176 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
3177 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
3178 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
3180 if (adap->params.rev > 0) {
3181 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
3182 t3_set_reg_field(adap, A_TP_PARA_REG3, 0,
3183 F_TXPACEAUTO | F_TXPACEAUTOSTRICT);
3184 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
3185 tp_wr_indirect(adap, A_TP_VLAN_PRI_MAP, 0xfa50);
3186 tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP0, 0xfac688);
3187 tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP1, 0xfac688);
3189 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
3191 if (adap->params.rev == T3_REV_C)
3192 t3_set_reg_field(adap, A_TP_PC_CONFIG,
3193 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
3194 V_TABLELATENCYDELTA(4));
3196 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
3197 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
3198 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
3199 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
3201 if (adap->params.nports > 2) {
3202 t3_set_reg_field(adap, A_TP_PC_CONFIG2, 0,
3203 F_ENABLETXPORTFROMDA2 | F_ENABLETXPORTFROMDA |
3204 F_ENABLERXPORTFROMADDR);
3205 tp_wr_bits_indirect(adap, A_TP_QOS_RX_MAP_MODE,
3206 V_RXMAPMODE(M_RXMAPMODE), 0);
3207 tp_wr_indirect(adap, A_TP_INGRESS_CONFIG, V_BITPOS0(48) |
3208 V_BITPOS1(49) | V_BITPOS2(50) | V_BITPOS3(51) |
3209 F_ENABLEEXTRACT | F_ENABLEEXTRACTIONSFD |
3210 F_ENABLEINSERTION | F_ENABLEINSERTIONSFD);
3211 tp_wr_indirect(adap, A_TP_PREAMBLE_MSB, 0xfb000000);
3212 tp_wr_indirect(adap, A_TP_PREAMBLE_LSB, 0xd5);
3213 tp_wr_indirect(adap, A_TP_INTF_FROM_TX_PKT, F_INTFFROMTXPKT);
3217 /* TCP timer values in ms */
3218 #define TP_DACK_TIMER 50
3219 #define TP_RTO_MIN 250
3222 * tp_set_timers - set TP timing parameters
3223 * @adap: the adapter to set
3224 * @core_clk: the core clock frequency in Hz
3226 * Set TP's timing parameters, such as the various timer resolutions and
3227 * the TCP timer values.
3229 static void tp_set_timers(adapter_t *adap, unsigned int core_clk)
3231 unsigned int tre = adap->params.tp.tre;
3232 unsigned int dack_re = adap->params.tp.dack_re;
3233 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
3234 unsigned int tps = core_clk >> tre;
3236 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
3237 V_DELAYEDACKRESOLUTION(dack_re) |
3238 V_TIMESTAMPRESOLUTION(tstamp_re));
3239 t3_write_reg(adap, A_TP_DACK_TIMER,
3240 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
3241 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
3242 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
3243 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
3244 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
3245 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
3246 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
3247 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
3250 #define SECONDS * tps
3252 t3_write_reg(adap, A_TP_MSL,
3253 adap->params.rev > 0 ? 0 : 2 SECONDS);
3254 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
3255 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
3256 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
3257 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
3258 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
3259 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
3260 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
3261 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
3267 * t3_tp_set_coalescing_size - set receive coalescing size
3268 * @adap: the adapter
3269 * @size: the receive coalescing size
3270 * @psh: whether a set PSH bit should deliver coalesced data
3272 * Set the receive coalescing size and PSH bit handling.
3274 int t3_tp_set_coalescing_size(adapter_t *adap, unsigned int size, int psh)
3278 if (size > MAX_RX_COALESCING_LEN)
3281 val = t3_read_reg(adap, A_TP_PARA_REG3);
3282 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
3285 val |= F_RXCOALESCEENABLE;
3287 val |= F_RXCOALESCEPSHEN;
3288 size = min(MAX_RX_COALESCING_LEN, size);
3289 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
3290 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
3292 t3_write_reg(adap, A_TP_PARA_REG3, val);
3297 * t3_tp_set_max_rxsize - set the max receive size
3298 * @adap: the adapter
3299 * @size: the max receive size
3301 * Set TP's max receive size. This is the limit that applies when
3302 * receive coalescing is disabled.
3304 void t3_tp_set_max_rxsize(adapter_t *adap, unsigned int size)
3306 t3_write_reg(adap, A_TP_PARA_REG7,
3307 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
3310 static void __devinit init_mtus(unsigned short mtus[])
3313 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
3314 * it can accomodate max size TCP/IP headers when SACK and timestamps
3315 * are enabled and still have at least 8 bytes of payload.
3336 * init_cong_ctrl - initialize congestion control parameters
3337 * @a: the alpha values for congestion control
3338 * @b: the beta values for congestion control
3340 * Initialize the congestion control parameters.
3342 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
3344 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
3369 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
3372 b[13] = b[14] = b[15] = b[16] = 3;
3373 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
3374 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
3379 /* The minimum additive increment value for the congestion control table */
3380 #define CC_MIN_INCR 2U
3383 * t3_load_mtus - write the MTU and congestion control HW tables
3384 * @adap: the adapter
3385 * @mtus: the unrestricted values for the MTU table
3386 * @alpha: the values for the congestion control alpha parameter
3387 * @beta: the values for the congestion control beta parameter
3388 * @mtu_cap: the maximum permitted effective MTU
3390 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
3391 * Update the high-speed congestion control table with the supplied alpha,
3394 void t3_load_mtus(adapter_t *adap, unsigned short mtus[NMTUS],
3395 unsigned short alpha[NCCTRL_WIN],
3396 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
3398 static const unsigned int avg_pkts[NCCTRL_WIN] = {
3399 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
3400 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
3401 28672, 40960, 57344, 81920, 114688, 163840, 229376 };
3405 for (i = 0; i < NMTUS; ++i) {
3406 unsigned int mtu = min(mtus[i], mtu_cap);
3407 unsigned int log2 = fls(mtu);
3409 if (!(mtu & ((1 << log2) >> 2))) /* round */
3411 t3_write_reg(adap, A_TP_MTU_TABLE,
3412 (i << 24) | (log2 << 16) | mtu);
3414 for (w = 0; w < NCCTRL_WIN; ++w) {
3417 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3420 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3421 (w << 16) | (beta[w] << 13) | inc);
3427 * t3_read_hw_mtus - returns the values in the HW MTU table
3428 * @adap: the adapter
3429 * @mtus: where to store the HW MTU values
3431 * Reads the HW MTU table.
3433 void t3_read_hw_mtus(adapter_t *adap, unsigned short mtus[NMTUS])
3437 for (i = 0; i < NMTUS; ++i) {
3440 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
3441 val = t3_read_reg(adap, A_TP_MTU_TABLE);
3442 mtus[i] = val & 0x3fff;
3447 * t3_get_cong_cntl_tab - reads the congestion control table
3448 * @adap: the adapter
3449 * @incr: where to store the alpha values
3451 * Reads the additive increments programmed into the HW congestion
3454 void t3_get_cong_cntl_tab(adapter_t *adap,
3455 unsigned short incr[NMTUS][NCCTRL_WIN])
3457 unsigned int mtu, w;
3459 for (mtu = 0; mtu < NMTUS; ++mtu)
3460 for (w = 0; w < NCCTRL_WIN; ++w) {
3461 t3_write_reg(adap, A_TP_CCTRL_TABLE,
3462 0xffff0000 | (mtu << 5) | w);
3463 incr[mtu][w] = (unsigned short)t3_read_reg(adap,
3464 A_TP_CCTRL_TABLE) & 0x1fff;
3469 * t3_tp_get_mib_stats - read TP's MIB counters
3470 * @adap: the adapter
3471 * @tps: holds the returned counter values
3473 * Returns the values of TP's MIB counters.
3475 void t3_tp_get_mib_stats(adapter_t *adap, struct tp_mib_stats *tps)
3477 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *)tps,
3478 sizeof(*tps) / sizeof(u32), 0);
3482 * t3_read_pace_tbl - read the pace table
3483 * @adap: the adapter
3484 * @pace_vals: holds the returned values
3486 * Returns the values of TP's pace table in nanoseconds.
3488 void t3_read_pace_tbl(adapter_t *adap, unsigned int pace_vals[NTX_SCHED])
3490 unsigned int i, tick_ns = dack_ticks_to_usec(adap, 1000);
3492 for (i = 0; i < NTX_SCHED; i++) {
3493 t3_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
3494 pace_vals[i] = t3_read_reg(adap, A_TP_PACE_TABLE) * tick_ns;
3499 * t3_set_pace_tbl - set the pace table
3500 * @adap: the adapter
3501 * @pace_vals: the pace values in nanoseconds
3502 * @start: index of the first entry in the HW pace table to set
3503 * @n: how many entries to set
3505 * Sets (a subset of the) HW pace table.
3507 void t3_set_pace_tbl(adapter_t *adap, unsigned int *pace_vals,
3508 unsigned int start, unsigned int n)
3510 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
3512 for ( ; n; n--, start++, pace_vals++)
3513 t3_write_reg(adap, A_TP_PACE_TABLE, (start << 16) |
3514 ((*pace_vals + tick_ns / 2) / tick_ns));
3517 #define ulp_region(adap, name, start, len) \
3518 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
3519 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
3520 (start) + (len) - 1); \
3523 #define ulptx_region(adap, name, start, len) \
3524 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
3525 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
3526 (start) + (len) - 1)
3528 static void ulp_config(adapter_t *adap, const struct tp_params *p)
3530 unsigned int m = p->chan_rx_size;
3532 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
3533 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
3534 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
3535 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
3536 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
3537 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
3538 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
3539 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
3544 * t3_set_proto_sram - set the contents of the protocol sram
3545 * @adapter: the adapter
3546 * @data: the protocol image
3548 * Write the contents of the protocol SRAM.
3550 int t3_set_proto_sram(adapter_t *adap, const u8 *data)
3553 const u32 *buf = (const u32 *)data;
3555 for (i = 0; i < PROTO_SRAM_LINES; i++) {
3556 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, cpu_to_be32(*buf++));
3557 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, cpu_to_be32(*buf++));
3558 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, cpu_to_be32(*buf++));
3559 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, cpu_to_be32(*buf++));
3560 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, cpu_to_be32(*buf++));
3562 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
3563 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
3570 * t3_config_trace_filter - configure one of the tracing filters
3571 * @adapter: the adapter
3572 * @tp: the desired trace filter parameters
3573 * @filter_index: which filter to configure
3574 * @invert: if set non-matching packets are traced instead of matching ones
3575 * @enable: whether to enable or disable the filter
3577 * Configures one of the tracing filters available in HW.
3579 void t3_config_trace_filter(adapter_t *adapter, const struct trace_params *tp,
3580 int filter_index, int invert, int enable)
3582 u32 addr, key[4], mask[4];
3584 key[0] = tp->sport | (tp->sip << 16);
3585 key[1] = (tp->sip >> 16) | (tp->dport << 16);
3587 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
3589 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
3590 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
3591 mask[2] = tp->dip_mask;
3592 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
3595 key[3] |= (1 << 29);
3597 key[3] |= (1 << 28);
3599 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3600 tp_wr_indirect(adapter, addr++, key[0]);
3601 tp_wr_indirect(adapter, addr++, mask[0]);
3602 tp_wr_indirect(adapter, addr++, key[1]);
3603 tp_wr_indirect(adapter, addr++, mask[1]);
3604 tp_wr_indirect(adapter, addr++, key[2]);
3605 tp_wr_indirect(adapter, addr++, mask[2]);
3606 tp_wr_indirect(adapter, addr++, key[3]);
3607 tp_wr_indirect(adapter, addr, mask[3]);
3608 (void) t3_read_reg(adapter, A_TP_PIO_DATA);
3612 * t3_query_trace_filter - query a tracing filter
3613 * @adapter: the adapter
3614 * @tp: the current trace filter parameters
3615 * @filter_index: which filter to query
3616 * @inverted: non-zero if the filter is inverted
3617 * @enabled: non-zero if the filter is enabled
3619 * Returns the current settings of the specified HW tracing filter.
3621 void t3_query_trace_filter(adapter_t *adapter, struct trace_params *tp,
3622 int filter_index, int *inverted, int *enabled)
3624 u32 addr, key[4], mask[4];
3626 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3627 key[0] = tp_rd_indirect(adapter, addr++);
3628 mask[0] = tp_rd_indirect(adapter, addr++);
3629 key[1] = tp_rd_indirect(adapter, addr++);
3630 mask[1] = tp_rd_indirect(adapter, addr++);
3631 key[2] = tp_rd_indirect(adapter, addr++);
3632 mask[2] = tp_rd_indirect(adapter, addr++);
3633 key[3] = tp_rd_indirect(adapter, addr++);
3634 mask[3] = tp_rd_indirect(adapter, addr);
3636 tp->sport = key[0] & 0xffff;
3637 tp->sip = (key[0] >> 16) | ((key[1] & 0xffff) << 16);
3638 tp->dport = key[1] >> 16;
3640 tp->proto = key[3] & 0xff;
3641 tp->vlan = key[3] >> 8;
3642 tp->intf = key[3] >> 20;
3644 tp->sport_mask = mask[0] & 0xffff;
3645 tp->sip_mask = (mask[0] >> 16) | ((mask[1] & 0xffff) << 16);
3646 tp->dport_mask = mask[1] >> 16;
3647 tp->dip_mask = mask[2];
3648 tp->proto_mask = mask[3] & 0xff;
3649 tp->vlan_mask = mask[3] >> 8;
3650 tp->intf_mask = mask[3] >> 20;
3652 *inverted = key[3] & (1 << 29);
3653 *enabled = key[3] & (1 << 28);
3657 * t3_config_sched - configure a HW traffic scheduler
3658 * @adap: the adapter
3659 * @kbps: target rate in Kbps
3660 * @sched: the scheduler index
3662 * Configure a Tx HW scheduler for the target rate.
3664 int t3_config_sched(adapter_t *adap, unsigned int kbps, int sched)
3666 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3667 unsigned int clk = adap->params.vpd.cclk * 1000;
3668 unsigned int selected_cpt = 0, selected_bpt = 0;
3671 kbps *= 125; /* -> bytes */
3672 for (cpt = 1; cpt <= 255; cpt++) {
3674 bpt = (kbps + tps / 2) / tps;
3675 if (bpt > 0 && bpt <= 255) {
3677 delta = v >= kbps ? v - kbps : kbps - v;
3678 if (delta < mindelta) {
3683 } else if (selected_cpt)
3689 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3690 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3691 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3693 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3695 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3696 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3701 * t3_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
3702 * @adap: the adapter
3703 * @sched: the scheduler index
3704 * @ipg: the interpacket delay in tenths of nanoseconds
3706 * Set the interpacket delay for a HW packet rate scheduler.
3708 int t3_set_sched_ipg(adapter_t *adap, int sched, unsigned int ipg)
3710 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3712 /* convert ipg to nearest number of core clocks */
3713 ipg *= core_ticks_per_usec(adap);
3714 ipg = (ipg + 5000) / 10000;
3718 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3719 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3721 v = (v & 0xffff) | (ipg << 16);
3723 v = (v & 0xffff0000) | ipg;
3724 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3725 t3_read_reg(adap, A_TP_TM_PIO_DATA);
3730 * t3_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3731 * @adap: the adapter
3732 * @sched: the scheduler index
3733 * @kbps: the byte rate in Kbps
3734 * @ipg: the interpacket delay in tenths of nanoseconds
3736 * Return the current configuration of a HW Tx scheduler.
3738 void t3_get_tx_sched(adapter_t *adap, unsigned int sched, unsigned int *kbps,
3741 unsigned int v, addr, bpt, cpt;
3744 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3745 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3746 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3749 bpt = (v >> 8) & 0xff;
3752 *kbps = 0; /* scheduler disabled */
3754 v = (adap->params.vpd.cclk * 1000) / cpt;
3755 *kbps = (v * bpt) / 125;
3759 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3760 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3761 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3765 *ipg = (10000 * v) / core_ticks_per_usec(adap);
3770 * tp_init - configure TP
3771 * @adap: the adapter
3772 * @p: TP configuration parameters
3774 * Initializes the TP HW module.
3776 static int tp_init(adapter_t *adap, const struct tp_params *p)
3781 t3_set_vlan_accel(adap, 3, 0);
3783 if (is_offload(adap)) {
3784 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3785 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3786 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3789 CH_ERR(adap, "TP initialization timed out\n");
3793 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3798 * t3_mps_set_active_ports - configure port failover
3799 * @adap: the adapter
3800 * @port_mask: bitmap of active ports
3802 * Sets the active ports according to the supplied bitmap.
3804 int t3_mps_set_active_ports(adapter_t *adap, unsigned int port_mask)
3806 if (port_mask & ~((1 << adap->params.nports) - 1))
3808 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3809 port_mask << S_PORT0ACTIVE);
3814 * chan_init_hw - channel-dependent HW initialization
3815 * @adap: the adapter
3816 * @chan_map: bitmap of Tx channels being used
3818 * Perform the bits of HW initialization that are dependent on the Tx
3819 * channels being used.
3821 static void chan_init_hw(adapter_t *adap, unsigned int chan_map)
3825 if (chan_map != 3) { /* one channel */
3826 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3827 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3828 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3829 (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3830 F_TPTXPORT1EN | F_PORT1ACTIVE));
3831 t3_write_reg(adap, A_PM1_TX_CFG,
3832 chan_map == 1 ? 0xffffffff : 0);
3834 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3835 V_TX_MOD_QUEUE_REQ_MAP(0xff));
3836 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xd9c8);
3837 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfbea);
3838 } else { /* two channels */
3839 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3840 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3841 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3842 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3843 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3844 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3846 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3847 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3848 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3849 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3850 for (i = 0; i < 16; i++)
3851 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3852 (i << 16) | 0x1010);
3853 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xba98);
3854 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfedc);
3858 static int calibrate_xgm(adapter_t *adapter)
3860 if (uses_xaui(adapter)) {
3863 for (i = 0; i < 5; ++i) {
3864 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3865 (void) t3_read_reg(adapter, A_XGM_XAUI_IMP);
3867 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3868 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3869 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3870 V_XAUIIMP(G_CALIMP(v) >> 2));
3874 CH_ERR(adapter, "MAC calibration failed\n");
3877 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3878 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3879 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3880 F_XGM_IMPSETUPDATE);
3885 static void calibrate_xgm_t3b(adapter_t *adapter)
3887 if (!uses_xaui(adapter)) {
3888 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3889 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3890 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3891 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3892 F_XGM_IMPSETUPDATE);
3893 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3895 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3896 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3900 struct mc7_timing_params {
3901 unsigned char ActToPreDly;
3902 unsigned char ActToRdWrDly;
3903 unsigned char PreCyc;
3904 unsigned char RefCyc[5];
3905 unsigned char BkCyc;
3906 unsigned char WrToRdDly;
3907 unsigned char RdToWrDly;
3911 * Write a value to a register and check that the write completed. These
3912 * writes normally complete in a cycle or two, so one read should suffice.
3913 * The very first read exists to flush the posted write to the device.
3915 static int wrreg_wait(adapter_t *adapter, unsigned int addr, u32 val)
3917 t3_write_reg(adapter, addr, val);
3918 (void) t3_read_reg(adapter, addr); /* flush */
3919 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3921 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3925 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3927 static const unsigned int mc7_mode[] = {
3928 0x632, 0x642, 0x652, 0x432, 0x442
3930 static const struct mc7_timing_params mc7_timings[] = {
3931 { 12, 3, 4, { 20, 28, 34, 52, 0 }, 15, 6, 4 },
3932 { 12, 4, 5, { 20, 28, 34, 52, 0 }, 16, 7, 4 },
3933 { 12, 5, 6, { 20, 28, 34, 52, 0 }, 17, 8, 4 },
3934 { 9, 3, 4, { 15, 21, 26, 39, 0 }, 12, 6, 4 },
3935 { 9, 4, 5, { 15, 21, 26, 39, 0 }, 13, 7, 4 }
3939 unsigned int width, density, slow, attempts;
3940 adapter_t *adapter = mc7->adapter;
3941 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3946 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3947 slow = val & F_SLOW;
3948 width = G_WIDTH(val);
3949 density = G_DEN(val);
3951 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3952 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3956 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3957 (void) t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3959 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3960 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3961 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3967 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3968 V_ACTTOPREDLY(p->ActToPreDly) |
3969 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3970 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3971 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3973 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3974 val | F_CLKEN | F_TERM150);
3975 (void) t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3978 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3983 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3984 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3985 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3986 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3990 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3991 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL,
3996 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3997 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3998 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3999 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
4000 mc7_mode[mem_type]) ||
4001 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
4002 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
4005 /* clock value is in KHz */
4006 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
4007 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
4009 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
4010 F_PERREFEN | V_PREREFDIV(mc7_clock));
4011 (void) t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
4013 t3_write_reg(adapter, mc7->offset + A_MC7_ECC,
4014 F_ECCGENEN | F_ECCCHKEN);
4015 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
4016 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
4017 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
4018 (mc7->size << width) - 1);
4019 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
4020 (void) t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
4025 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
4026 } while ((val & F_BUSY) && --attempts);
4028 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
4032 /* Enable normal memory accesses. */
4033 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
4040 static void config_pcie(adapter_t *adap)
4042 static const u16 ack_lat[4][6] = {
4043 { 237, 416, 559, 1071, 2095, 4143 },
4044 { 128, 217, 289, 545, 1057, 2081 },
4045 { 73, 118, 154, 282, 538, 1050 },
4046 { 67, 107, 86, 150, 278, 534 }
4048 static const u16 rpl_tmr[4][6] = {
4049 { 711, 1248, 1677, 3213, 6285, 12429 },
4050 { 384, 651, 867, 1635, 3171, 6243 },
4051 { 219, 354, 462, 846, 1614, 3150 },
4052 { 201, 321, 258, 450, 834, 1602 }
4056 unsigned int log2_width, pldsize;
4057 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
4059 t3_os_pci_read_config_2(adap,
4060 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
4062 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
4065 * Gen2 adapter pcie bridge compatibility requires minimum
4066 * Max_Read_Request_size
4068 t3_os_pci_read_config_2(adap, 0x2, &devid);
4069 if (devid == 0x37) {
4070 t3_os_pci_write_config_2(adap,
4071 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
4072 val & ~PCI_EXP_DEVCTL_READRQ & ~PCI_EXP_DEVCTL_PAYLOAD);
4076 t3_os_pci_read_config_2(adap,
4077 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
4080 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
4081 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
4082 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
4083 log2_width = fls(adap->params.pci.width) - 1;
4084 acklat = ack_lat[log2_width][pldsize];
4085 if (val & 1) /* check LOsEnable */
4086 acklat += fst_trn_tx * 4;
4087 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
4089 if (adap->params.rev == 0)
4090 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
4091 V_T3A_ACKLAT(M_T3A_ACKLAT),
4092 V_T3A_ACKLAT(acklat));
4094 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
4097 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
4098 V_REPLAYLMT(rpllmt));
4100 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
4101 t3_set_reg_field(adap, A_PCIE_CFG, 0,
4102 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
4103 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
4107 * t3_init_hw - initialize and configure T3 HW modules
4108 * @adapter: the adapter
4109 * @fw_params: initial parameters to pass to firmware (optional)
4111 * Initialize and configure T3 HW modules. This performs the
4112 * initialization steps that need to be done once after a card is reset.
4113 * MAC and PHY initialization is handled separarely whenever a port is
4116 * @fw_params are passed to FW and their value is platform dependent.
4117 * Only the top 8 bits are available for use, the rest must be 0.
4119 int t3_init_hw(adapter_t *adapter, u32 fw_params)
4121 int err = -EIO, attempts, i;
4122 const struct vpd_params *vpd = &adapter->params.vpd;
4124 if (adapter->params.rev > 0)
4125 calibrate_xgm_t3b(adapter);
4126 else if (calibrate_xgm(adapter))
4129 if (adapter->params.nports > 2)
4130 t3_mac_init(&adap2pinfo(adapter, 0)->mac);
4133 partition_mem(adapter, &adapter->params.tp);
4135 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
4136 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
4137 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
4138 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
4139 adapter->params.mc5.nfilters,
4140 adapter->params.mc5.nroutes))
4143 for (i = 0; i < 32; i++)
4144 if (clear_sge_ctxt(adapter, i, F_CQ))
4148 if (tp_init(adapter, &adapter->params.tp))
4151 t3_tp_set_coalescing_size(adapter,
4152 min(adapter->params.sge.max_pkt_size,
4153 MAX_RX_COALESCING_LEN), 1);
4154 t3_tp_set_max_rxsize(adapter,
4155 min(adapter->params.sge.max_pkt_size, 16384U));
4156 ulp_config(adapter, &adapter->params.tp);
4157 if (is_pcie(adapter))
4158 config_pcie(adapter);
4160 t3_set_reg_field(adapter, A_PCIX_CFG, 0,
4161 F_DMASTOPEN | F_CLIDECEN);
4163 if (adapter->params.rev == T3_REV_C)
4164 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
4165 F_CFG_CQE_SOP_MASK);
4167 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
4168 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
4169 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
4170 chan_init_hw(adapter, adapter->params.chan_map);
4171 t3_sge_init(adapter, &adapter->params.sge);
4173 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
4175 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
4176 t3_write_reg(adapter, A_CIM_BOOT_CFG,
4177 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
4178 (void) t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
4181 do { /* wait for uP to initialize */
4183 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
4185 CH_ERR(adapter, "uP initialization timed out\n");
4195 * get_pci_mode - determine a card's PCI mode
4196 * @adapter: the adapter
4197 * @p: where to store the PCI settings
4199 * Determines a card's PCI mode and associated parameters, such as speed
4202 static void __devinit get_pci_mode(adapter_t *adapter, struct pci_params *p)
4204 static unsigned short speed_map[] = { 33, 66, 100, 133 };
4205 u32 pci_mode, pcie_cap;
4207 pcie_cap = t3_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
4211 p->variant = PCI_VARIANT_PCIE;
4212 p->pcie_cap_addr = pcie_cap;
4213 t3_os_pci_read_config_2(adapter, pcie_cap + PCI_EXP_LNKSTA,
4215 p->width = (val >> 4) & 0x3f;
4219 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
4220 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
4221 p->width = (pci_mode & F_64BIT) ? 64 : 32;
4222 pci_mode = G_PCIXINITPAT(pci_mode);
4224 p->variant = PCI_VARIANT_PCI;
4225 else if (pci_mode < 4)
4226 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
4227 else if (pci_mode < 8)
4228 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
4230 p->variant = PCI_VARIANT_PCIX_266_MODE2;
4234 * init_link_config - initialize a link's SW state
4235 * @lc: structure holding the link state
4236 * @caps: link capabilities
4238 * Initializes the SW state maintained for each link, including the link's
4239 * capabilities and default speed/duplex/flow-control/autonegotiation
4242 static void __devinit init_link_config(struct link_config *lc,
4245 lc->supported = caps;
4246 lc->requested_speed = lc->speed = SPEED_INVALID;
4247 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
4248 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
4249 if (lc->supported & SUPPORTED_Autoneg) {
4250 lc->advertising = lc->supported;
4251 lc->autoneg = AUTONEG_ENABLE;
4252 lc->requested_fc |= PAUSE_AUTONEG;
4254 lc->advertising = 0;
4255 lc->autoneg = AUTONEG_DISABLE;
4260 * mc7_calc_size - calculate MC7 memory size
4261 * @cfg: the MC7 configuration
4263 * Calculates the size of an MC7 memory in bytes from the value of its
4264 * configuration register.
4266 static unsigned int __devinit mc7_calc_size(u32 cfg)
4268 unsigned int width = G_WIDTH(cfg);
4269 unsigned int banks = !!(cfg & F_BKS) + 1;
4270 unsigned int org = !!(cfg & F_ORG) + 1;
4271 unsigned int density = G_DEN(cfg);
4272 unsigned int MBs = ((256 << density) * banks) / (org << width);
4277 static void __devinit mc7_prep(adapter_t *adapter, struct mc7 *mc7,
4278 unsigned int base_addr, const char *name)
4282 mc7->adapter = adapter;
4284 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
4285 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
4286 mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
4287 mc7->width = G_WIDTH(cfg);
4290 void mac_prep(struct cmac *mac, adapter_t *adapter, int index)
4294 mac->adapter = adapter;
4295 mac->multiport = adapter->params.nports > 2;
4296 if (mac->multiport) {
4297 mac->ext_port = (unsigned char)index;
4302 /* Gen2 adapter uses VPD xauicfg[] to notify driver which MAC
4303 is connected to each port, its suppose to be using xgmac0 for both ports
4305 t3_os_pci_read_config_2(adapter, 0x2, &devid);
4307 if (mac->multiport ||
4308 (!adapter->params.vpd.xauicfg[1] && (devid==0x37)))
4311 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
4313 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
4314 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
4315 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
4316 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
4322 * early_hw_init - HW initialization done at card detection time
4323 * @adapter: the adapter
4324 * @ai: contains information about the adapter type and properties
4326 * Perfoms the part of HW initialization that is done early on when the
4327 * driver first detecs the card. Most of the HW state is initialized
4328 * lazily later on when a port or an offload function are first used.
4330 void early_hw_init(adapter_t *adapter, const struct adapter_info *ai)
4332 u32 val = V_PORTSPEED(is_10G(adapter) || adapter->params.nports > 2 ?
4334 u32 gpio_out = ai->gpio_out;
4336 mi1_init(adapter, ai);
4337 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
4338 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
4339 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
4340 gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
4341 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
4342 t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
4344 if (adapter->params.rev == 0 || !uses_xaui(adapter))
4347 /* Enable MAC clocks so we can access the registers */
4348 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
4349 (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
4351 val |= F_CLKDIVRESET_;
4352 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
4353 (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
4354 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
4355 (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
4359 * t3_reset_adapter - reset the adapter
4360 * @adapter: the adapter
4362 * Reset the adapter.
4364 int t3_reset_adapter(adapter_t *adapter)
4366 int i, save_and_restore_pcie =
4367 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
4370 if (save_and_restore_pcie)
4371 t3_os_pci_save_state(adapter);
4372 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
4375 * Delay. Give Some time to device to reset fully.
4376 * XXX The delay time should be modified.
4378 for (i = 0; i < 10; i++) {
4380 t3_os_pci_read_config_2(adapter, 0x00, &devid);
4381 if (devid == 0x1425)
4385 if (devid != 0x1425)
4388 if (save_and_restore_pcie)
4389 t3_os_pci_restore_state(adapter);
4393 static int init_parity(adapter_t *adap)
4397 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
4400 for (err = i = 0; !err && i < 16; i++)
4401 err = clear_sge_ctxt(adap, i, F_EGRESS);
4402 for (i = 0xfff0; !err && i <= 0xffff; i++)
4403 err = clear_sge_ctxt(adap, i, F_EGRESS);
4404 for (i = 0; !err && i < SGE_QSETS; i++)
4405 err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
4409 t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
4410 for (i = 0; i < 4; i++)
4411 for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
4412 t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
4413 F_IBQDBGWR | V_IBQDBGQID(i) |
4414 V_IBQDBGADDR(addr));
4415 err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
4416 F_IBQDBGBUSY, 0, 2, 1);
4424 * t3_prep_adapter - prepare SW and HW for operation
4425 * @adapter: the adapter
4426 * @ai: contains information about the adapter type and properties
4428 * Initialize adapter SW state for the various HW modules, set initial
4429 * values for some adapter tunables, take PHYs out of reset, and
4430 * initialize the MDIO interface.
4432 int __devinit t3_prep_adapter(adapter_t *adapter,
4433 const struct adapter_info *ai, int reset)
4436 unsigned int i, j = 0;
4438 get_pci_mode(adapter, &adapter->params.pci);
4440 adapter->params.info = ai;
4441 adapter->params.nports = ai->nports0 + ai->nports1;
4442 adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1);
4443 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
4446 * We used to only run the "adapter check task" once a second if
4447 * we had PHYs which didn't support interrupts (we would check
4448 * their link status once a second). Now we check other conditions
4449 * in that routine which would [potentially] impose a very high
4450 * interrupt load on the system. As such, we now always scan the
4451 * adapter state once a second ...
4453 adapter->params.linkpoll_period = 10;
4455 if (adapter->params.nports > 2)
4456 adapter->params.stats_update_period = VSC_STATS_ACCUM_SECS;
4458 adapter->params.stats_update_period = is_10G(adapter) ?
4459 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
4460 adapter->params.pci.vpd_cap_addr =
4461 t3_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
4463 ret = get_vpd_params(adapter, &adapter->params.vpd);
4467 if (reset && t3_reset_adapter(adapter))
4470 if (adapter->params.vpd.mclk) {
4471 struct tp_params *p = &adapter->params.tp;
4473 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
4474 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
4475 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
4477 p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
4478 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
4479 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
4480 p->cm_size = t3_mc7_size(&adapter->cm);
4481 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
4482 p->chan_tx_size = p->pmtx_size / p->nchan;
4483 p->rx_pg_size = 64 * 1024;
4484 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
4485 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
4486 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
4487 p->ntimer_qs = p->cm_size >= (128 << 20) ||
4488 adapter->params.rev > 0 ? 12 : 6;
4489 p->tre = fls(adapter->params.vpd.cclk / (1000 / TP_TMR_RES)) -
4491 p->dack_re = fls(adapter->params.vpd.cclk / 10) - 1; /* 100us */
4494 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
4495 t3_mc7_size(&adapter->pmtx) &&
4496 t3_mc7_size(&adapter->cm);
4498 t3_sge_prep(adapter, &adapter->params.sge);
4500 if (is_offload(adapter)) {
4501 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
4502 /* PR 6487. TOE and filtering are mutually exclusive */
4503 adapter->params.mc5.nfilters = 0;
4504 adapter->params.mc5.nroutes = 0;
4505 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
4507 init_mtus(adapter->params.mtus);
4508 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
4511 early_hw_init(adapter, ai);
4512 ret = init_parity(adapter);
4516 if (adapter->params.nports > 2 &&
4517 (ret = t3_vsc7323_init(adapter, adapter->params.nports)))
4520 for_each_port(adapter, i) {
4522 const struct port_type_info *pti;
4523 struct port_info *p = adap2pinfo(adapter, i);
4526 unsigned port_type = adapter->params.vpd.port_type[j];
4528 if (port_type < ARRAY_SIZE(port_types)) {
4529 pti = &port_types[port_type];
4535 if (j >= ARRAY_SIZE(adapter->params.vpd.port_type))
4538 ret = pti->phy_prep(p, ai->phy_base_addr + j,
4542 mac_prep(&p->mac, adapter, j);
4546 * The VPD EEPROM stores the base Ethernet address for the
4547 * card. A port's address is derived from the base by adding
4548 * the port's index to the base's low octet.
4550 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
4551 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
4553 t3_os_set_hw_addr(adapter, i, hw_addr);
4554 init_link_config(&p->link_config, p->phy.caps);
4555 p->phy.ops->power_down(&p->phy, 1);
4558 * If the PHY doesn't support interrupts for link status
4559 * changes, schedule a scan of the adapter links at least
4562 if (!(p->phy.caps & SUPPORTED_IRQ) &&
4563 adapter->params.linkpoll_period > 10)
4564 adapter->params.linkpoll_period = 10;
4571 * t3_reinit_adapter - prepare HW for operation again
4572 * @adapter: the adapter
4574 * Put HW in the same state as @t3_prep_adapter without any changes to
4575 * SW state. This is a cut down version of @t3_prep_adapter intended
4576 * to be used after events that wipe out HW state but preserve SW state,
4577 * e.g., EEH. The device must be reset before calling this.
4579 int t3_reinit_adapter(adapter_t *adap)
4584 early_hw_init(adap, adap->params.info);
4585 ret = init_parity(adap);
4589 if (adap->params.nports > 2 &&
4590 (ret = t3_vsc7323_init(adap, adap->params.nports)))
4593 for_each_port(adap, i) {
4594 const struct port_type_info *pti;
4595 struct port_info *p = adap2pinfo(adap, i);
4598 unsigned port_type = adap->params.vpd.port_type[j];
4600 if (port_type < ARRAY_SIZE(port_types)) {
4601 pti = &port_types[port_type];
4607 if (j >= ARRAY_SIZE(adap->params.vpd.port_type))
4610 ret = pti->phy_prep(p, p->phy.addr, NULL);
4613 p->phy.ops->power_down(&p->phy, 1);
4618 void t3_led_ready(adapter_t *adapter)
4620 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
4624 void t3_port_failover(adapter_t *adapter, int port)
4628 val = port ? F_PORT1ACTIVE : F_PORT0ACTIVE;
4629 t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4633 void t3_failover_done(adapter_t *adapter, int port)
4635 t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4636 F_PORT0ACTIVE | F_PORT1ACTIVE);
4639 void t3_failover_clear(adapter_t *adapter)
4641 t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4642 F_PORT0ACTIVE | F_PORT1ACTIVE);
4645 static int t3_cim_hac_read(adapter_t *adapter, u32 addr, u32 *val)
4649 t3_write_reg(adapter, A_CIM_HOST_ACC_CTRL, addr);
4650 if (t3_wait_op_done_val(adapter, A_CIM_HOST_ACC_CTRL,
4651 F_HOSTBUSY, 0, 10, 10, &v))
4654 *val = t3_read_reg(adapter, A_CIM_HOST_ACC_DATA);
4659 static int t3_cim_hac_write(adapter_t *adapter, u32 addr, u32 val)
4663 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, val);
4665 addr |= F_HOSTWRITE;
4666 t3_write_reg(adapter, A_CIM_HOST_ACC_CTRL, addr);
4668 if (t3_wait_op_done_val(adapter, A_CIM_HOST_ACC_CTRL,
4669 F_HOSTBUSY, 0, 10, 5, &v))
4674 int t3_get_up_la(adapter_t *adapter, u32 *stopped, u32 *index,
4675 u32 *size, void *data)
4680 if (*size < LA_ENTRIES * 4)
4683 ret = t3_cim_hac_read(adapter, LA_CTRL, &v);
4687 *stopped = !(v & 1);
4691 ret = t3_cim_hac_write(adapter, LA_CTRL, 0);
4696 for (i = 0; i < LA_ENTRIES; i++) {
4697 v = (i << 2) | (1 << 1);
4698 ret = t3_cim_hac_write(adapter, LA_CTRL, v);
4702 ret = t3_cim_hac_read(adapter, LA_CTRL, &v);
4707 while ((v & (1 << 1)) && cnt) {
4710 ret = t3_cim_hac_read(adapter, LA_CTRL, &v);
4718 ret = t3_cim_hac_read(adapter, LA_DATA, &v);
4725 ret = t3_cim_hac_read(adapter, LA_CTRL, &v);
4729 *index = (v >> 16) + 4;
4730 *size = LA_ENTRIES * 4;
4733 t3_cim_hac_write(adapter, LA_CTRL, 1);
4737 int t3_get_up_ioqs(adapter_t *adapter, u32 *size, void *data)
4742 if (*size < IOQ_ENTRIES * sizeof(struct t3_ioq_entry))
4745 for (i = 0; i < 4; i++) {
4746 ret = t3_cim_hac_read(adapter, (4 * i), &v);
4753 for (i = 0; i < IOQ_ENTRIES; i++) {
4754 u32 base_addr = 0x10 * (i + 1);
4756 for (j = 0; j < 4; j++) {
4757 ret = t3_cim_hac_read(adapter, base_addr + 4 * j, &v);
4765 *size = IOQ_ENTRIES * sizeof(struct t3_ioq_entry);