1 /**************************************************************************
3 Copyright (c) 2007, Chelsio Inc.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
28 ***************************************************************************/
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
35 #include <cxgb_include.h>
37 #include <dev/cxgb/cxgb_include.h>
41 #define msleep t3_os_sleep
44 * t3_wait_op_done_val - wait until an operation is completed
45 * @adapter: the adapter performing the operation
46 * @reg: the register to check for completion
47 * @mask: a single-bit field within @reg that indicates completion
48 * @polarity: the value of the field when the operation is completed
49 * @attempts: number of check iterations
50 * @delay: delay in usecs between iterations
51 * @valp: where to store the value of the register at completion time
53 * Wait until an operation is completed by checking a bit in a register
54 * up to @attempts times. If @valp is not NULL the value of the register
55 * at the time it indicated completion is stored there. Returns 0 if the
56 * operation completes and -EAGAIN otherwise.
58 int t3_wait_op_done_val(adapter_t *adapter, int reg, u32 mask, int polarity,
59 int attempts, int delay, u32 *valp)
62 u32 val = t3_read_reg(adapter, reg);
64 if (!!(val & mask) == polarity) {
77 * t3_write_regs - write a bunch of registers
78 * @adapter: the adapter to program
79 * @p: an array of register address/register value pairs
80 * @n: the number of address/value pairs
81 * @offset: register address offset
83 * Takes an array of register address/register value pairs and writes each
84 * value to the corresponding register. Register addresses are adjusted
85 * by the supplied offset.
87 void t3_write_regs(adapter_t *adapter, const struct addr_val_pair *p, int n,
91 t3_write_reg(adapter, p->reg_addr + offset, p->val);
97 * t3_set_reg_field - set a register field to a value
98 * @adapter: the adapter to program
99 * @addr: the register address
100 * @mask: specifies the portion of the register to modify
101 * @val: the new value for the register field
103 * Sets a register field specified by the supplied mask to the
106 void t3_set_reg_field(adapter_t *adapter, unsigned int addr, u32 mask, u32 val)
108 u32 v = t3_read_reg(adapter, addr) & ~mask;
110 t3_write_reg(adapter, addr, v | val);
111 (void) t3_read_reg(adapter, addr); /* flush */
115 * t3_read_indirect - read indirectly addressed registers
117 * @addr_reg: register holding the indirect address
118 * @data_reg: register holding the value of the indirect register
119 * @vals: where the read register values are stored
120 * @start_idx: index of first indirect register to read
121 * @nregs: how many indirect registers to read
123 * Reads registers that are accessed indirectly through an address/data
126 static void t3_read_indirect(adapter_t *adap, unsigned int addr_reg,
127 unsigned int data_reg, u32 *vals, unsigned int nregs,
128 unsigned int start_idx)
131 t3_write_reg(adap, addr_reg, start_idx);
132 *vals++ = t3_read_reg(adap, data_reg);
138 * t3_mc7_bd_read - read from MC7 through backdoor accesses
139 * @mc7: identifies MC7 to read from
140 * @start: index of first 64-bit word to read
141 * @n: number of 64-bit words to read
142 * @buf: where to store the read result
144 * Read n 64-bit words from MC7 starting at word start, using backdoor
147 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
150 static int shift[] = { 0, 0, 16, 24 };
151 static int step[] = { 0, 32, 16, 8 };
153 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
154 adapter_t *adap = mc7->adapter;
156 if (start >= size64 || start + n > size64)
159 start *= (8 << mc7->width);
164 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
168 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR,
170 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
171 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
172 while ((val & F_BUSY) && attempts--)
173 val = t3_read_reg(adap,
174 mc7->offset + A_MC7_BD_OP);
178 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
179 if (mc7->width == 0) {
180 val64 = t3_read_reg(adap,
181 mc7->offset + A_MC7_BD_DATA0);
182 val64 |= (u64)val << 32;
185 val >>= shift[mc7->width];
186 val64 |= (u64)val << (step[mc7->width] * i);
198 static void mi1_init(adapter_t *adap, const struct adapter_info *ai)
200 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
201 u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
204 if (!(ai->caps & SUPPORTED_10000baseT_Full))
206 t3_write_reg(adap, A_MI1_CFG, val);
209 #define MDIO_ATTEMPTS 20
212 * MI1 read/write operations for direct-addressed PHYs.
214 static int mi1_read(adapter_t *adapter, int phy_addr, int mmd_addr,
215 int reg_addr, unsigned int *valp)
218 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
224 t3_write_reg(adapter, A_MI1_ADDR, addr);
225 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
226 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
228 *valp = t3_read_reg(adapter, A_MI1_DATA);
229 MDIO_UNLOCK(adapter);
233 static int mi1_write(adapter_t *adapter, int phy_addr, int mmd_addr,
234 int reg_addr, unsigned int val)
237 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
243 t3_write_reg(adapter, A_MI1_ADDR, addr);
244 t3_write_reg(adapter, A_MI1_DATA, val);
245 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
246 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
247 MDIO_UNLOCK(adapter);
251 static struct mdio_ops mi1_mdio_ops = {
257 * MI1 read/write operations for indirect-addressed PHYs.
259 static int mi1_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
260 int reg_addr, unsigned int *valp)
263 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
266 t3_write_reg(adapter, A_MI1_ADDR, addr);
267 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
268 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
269 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
271 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
272 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
275 *valp = t3_read_reg(adapter, A_MI1_DATA);
277 MDIO_UNLOCK(adapter);
281 static int mi1_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
282 int reg_addr, unsigned int val)
285 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
288 t3_write_reg(adapter, A_MI1_ADDR, addr);
289 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
290 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
291 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
293 t3_write_reg(adapter, A_MI1_DATA, val);
294 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
295 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
298 MDIO_UNLOCK(adapter);
302 static struct mdio_ops mi1_mdio_ext_ops = {
308 * t3_mdio_change_bits - modify the value of a PHY register
309 * @phy: the PHY to operate on
310 * @mmd: the device address
311 * @reg: the register address
312 * @clear: what part of the register value to mask off
313 * @set: what part of the register value to set
315 * Changes the value of a PHY register by applying a mask to its current
316 * value and ORing the result with a new value.
318 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
324 ret = mdio_read(phy, mmd, reg, &val);
327 ret = mdio_write(phy, mmd, reg, val | set);
333 * t3_phy_reset - reset a PHY block
334 * @phy: the PHY to operate on
335 * @mmd: the device address of the PHY block to reset
336 * @wait: how long to wait for the reset to complete in 1ms increments
338 * Resets a PHY block and optionally waits for the reset to complete.
339 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
342 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
347 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
352 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
358 } while (ctl && --wait);
364 * t3_phy_advertise - set the PHY advertisement registers for autoneg
365 * @phy: the PHY to operate on
366 * @advert: bitmap of capabilities the PHY should advertise
368 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
369 * requested capabilities.
371 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
374 unsigned int val = 0;
376 err = mdio_read(phy, 0, MII_CTRL1000, &val);
380 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
381 if (advert & ADVERTISED_1000baseT_Half)
382 val |= ADVERTISE_1000HALF;
383 if (advert & ADVERTISED_1000baseT_Full)
384 val |= ADVERTISE_1000FULL;
386 err = mdio_write(phy, 0, MII_CTRL1000, val);
391 if (advert & ADVERTISED_10baseT_Half)
392 val |= ADVERTISE_10HALF;
393 if (advert & ADVERTISED_10baseT_Full)
394 val |= ADVERTISE_10FULL;
395 if (advert & ADVERTISED_100baseT_Half)
396 val |= ADVERTISE_100HALF;
397 if (advert & ADVERTISED_100baseT_Full)
398 val |= ADVERTISE_100FULL;
399 if (advert & ADVERTISED_Pause)
400 val |= ADVERTISE_PAUSE_CAP;
401 if (advert & ADVERTISED_Asym_Pause)
402 val |= ADVERTISE_PAUSE_ASYM;
403 return mdio_write(phy, 0, MII_ADVERTISE, val);
407 * t3_set_phy_speed_duplex - force PHY speed and duplex
408 * @phy: the PHY to operate on
409 * @speed: requested PHY speed
410 * @duplex: requested PHY duplex
412 * Force a 10/100/1000 PHY's speed and duplex. This also disables
413 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
415 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
420 err = mdio_read(phy, 0, MII_BMCR, &ctl);
425 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
426 if (speed == SPEED_100)
427 ctl |= BMCR_SPEED100;
428 else if (speed == SPEED_1000)
429 ctl |= BMCR_SPEED1000;
432 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
433 if (duplex == DUPLEX_FULL)
434 ctl |= BMCR_FULLDPLX;
436 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
437 ctl |= BMCR_ANENABLE;
438 return mdio_write(phy, 0, MII_BMCR, ctl);
441 static struct adapter_info t3_adap_info[] = {
443 F_GPIO2_OEN | F_GPIO4_OEN |
444 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
446 &mi1_mdio_ops, "Chelsio PE9000" },
448 F_GPIO2_OEN | F_GPIO4_OEN |
449 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
451 &mi1_mdio_ops, "Chelsio T302" },
453 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
454 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
455 SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
456 &mi1_mdio_ext_ops, "Chelsio T310" },
458 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
459 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
460 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
461 SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
462 &mi1_mdio_ext_ops, "Chelsio T320" },
464 F_GPIO5_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO5_OUT_VAL |
465 F_GPIO6_OUT_VAL | F_GPIO7_OUT_VAL,
466 F_GPIO1 | F_GPIO2 | F_GPIO3 | F_GPIO4, SUPPORTED_AUI,
467 &mi1_mdio_ops, "Chelsio T304" },
471 * Return the adapter_info structure with a given index. Out-of-range indices
474 const struct adapter_info *t3_get_adapter_info(unsigned int id)
476 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
479 #define CAPS_1G (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | \
480 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII)
481 #define CAPS_10G (SUPPORTED_10000baseT_Full | SUPPORTED_AUI)
483 static struct port_type_info port_types[] = {
485 { t3_ael1002_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
487 { t3_vsc8211_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
488 "10/100/1000BASE-T" },
489 { t3_mv88e1xxx_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
490 "10/100/1000BASE-T" },
491 { t3_xaui_direct_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4" },
492 { NULL, CAPS_10G, "10GBASE-KX4" },
493 { t3_qt2045_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4" },
494 { t3_ael1006_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
496 { NULL, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4" },
502 #define VPD_ENTRY(name, len) \
503 u8 name##_kword[2]; u8 name##_len; char name##_data[len]
506 * Partial EEPROM Vital Product Data structure. Includes only the ID and
515 VPD_ENTRY(pn, 16); /* part number */
516 VPD_ENTRY(ec, 16); /* EC level */
517 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
518 VPD_ENTRY(na, 12); /* MAC address base */
519 VPD_ENTRY(cclk, 6); /* core clock */
520 VPD_ENTRY(mclk, 6); /* mem clock */
521 VPD_ENTRY(uclk, 6); /* uP clk */
522 VPD_ENTRY(mdc, 6); /* MDIO clk */
523 VPD_ENTRY(mt, 2); /* mem timing */
524 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
525 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
526 VPD_ENTRY(port0, 2); /* PHY0 complex */
527 VPD_ENTRY(port1, 2); /* PHY1 complex */
528 VPD_ENTRY(port2, 2); /* PHY2 complex */
529 VPD_ENTRY(port3, 2); /* PHY3 complex */
530 VPD_ENTRY(rv, 1); /* csum */
531 u32 pad; /* for multiple-of-4 sizing and alignment */
534 #define EEPROM_MAX_POLL 4
535 #define EEPROM_STAT_ADDR 0x4000
536 #define VPD_BASE 0xc00
539 * t3_seeprom_read - read a VPD EEPROM location
540 * @adapter: adapter to read
541 * @addr: EEPROM address
542 * @data: where to store the read data
544 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
545 * VPD ROM capability. A zero is written to the flag bit when the
546 * addres is written to the control register. The hardware device will
547 * set the flag to 1 when 4 bytes have been read into the data register.
549 int t3_seeprom_read(adapter_t *adapter, u32 addr, u32 *data)
552 int attempts = EEPROM_MAX_POLL;
553 unsigned int base = adapter->params.pci.vpd_cap_addr;
555 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
558 t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR, (u16)addr);
561 t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
562 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
564 if (!(val & PCI_VPD_ADDR_F)) {
565 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
568 t3_os_pci_read_config_4(adapter, base + PCI_VPD_DATA, data);
569 *data = le32_to_cpu(*data);
574 * t3_seeprom_write - write a VPD EEPROM location
575 * @adapter: adapter to write
576 * @addr: EEPROM address
577 * @data: value to write
579 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
580 * VPD ROM capability.
582 int t3_seeprom_write(adapter_t *adapter, u32 addr, u32 data)
585 int attempts = EEPROM_MAX_POLL;
586 unsigned int base = adapter->params.pci.vpd_cap_addr;
588 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
591 t3_os_pci_write_config_4(adapter, base + PCI_VPD_DATA,
593 t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR,
594 (u16)addr | PCI_VPD_ADDR_F);
597 t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
598 } while ((val & PCI_VPD_ADDR_F) && --attempts);
600 if (val & PCI_VPD_ADDR_F) {
601 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
608 * t3_seeprom_wp - enable/disable EEPROM write protection
609 * @adapter: the adapter
610 * @enable: 1 to enable write protection, 0 to disable it
612 * Enables or disables write protection on the serial EEPROM.
614 int t3_seeprom_wp(adapter_t *adapter, int enable)
616 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
620 * Convert a character holding a hex digit to a number.
622 static unsigned int hex2int(unsigned char c)
624 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
628 * get_vpd_params - read VPD parameters from VPD EEPROM
629 * @adapter: adapter to read
630 * @p: where to store the parameters
632 * Reads card parameters stored in VPD EEPROM.
634 static int get_vpd_params(adapter_t *adapter, struct vpd_params *p)
640 * Card information is normally at VPD_BASE but some early cards had
643 ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
646 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
648 for (i = 0; i < sizeof(vpd); i += 4) {
649 ret = t3_seeprom_read(adapter, addr + i,
650 (u32 *)((u8 *)&vpd + i));
655 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
656 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
657 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
658 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
659 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
660 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
662 /* Old eeproms didn't have port information */
663 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
664 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
665 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
667 p->port_type[0] = (u8)hex2int(vpd.port0_data[0]);
668 p->port_type[1] = (u8)hex2int(vpd.port1_data[0]);
669 p->port_type[2] = (u8)hex2int(vpd.port2_data[0]);
670 p->port_type[3] = (u8)hex2int(vpd.port3_data[0]);
671 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
672 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
675 for (i = 0; i < 6; i++)
676 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
677 hex2int(vpd.na_data[2 * i + 1]);
681 /* serial flash and firmware constants */
683 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
684 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
685 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
687 /* flash command opcodes */
688 SF_PROG_PAGE = 2, /* program page */
689 SF_WR_DISABLE = 4, /* disable writes */
690 SF_RD_STATUS = 5, /* read status register */
691 SF_WR_ENABLE = 6, /* enable writes */
692 SF_RD_DATA_FAST = 0xb, /* read flash */
693 SF_ERASE_SECTOR = 0xd8, /* erase sector */
695 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
696 FW_VERS_ADDR = 0x77ffc, /* flash address holding FW version */
697 FW_MIN_SIZE = 8 /* at least version and csum */
701 * sf1_read - read data from the serial flash
702 * @adapter: the adapter
703 * @byte_cnt: number of bytes to read
704 * @cont: whether another operation will be chained
705 * @valp: where to store the read data
707 * Reads up to 4 bytes of data from the serial flash. The location of
708 * the read needs to be specified prior to calling this by issuing the
709 * appropriate commands to the serial flash.
711 static int sf1_read(adapter_t *adapter, unsigned int byte_cnt, int cont,
716 if (!byte_cnt || byte_cnt > 4)
718 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
720 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
721 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
723 *valp = t3_read_reg(adapter, A_SF_DATA);
728 * sf1_write - write data to the serial flash
729 * @adapter: the adapter
730 * @byte_cnt: number of bytes to write
731 * @cont: whether another operation will be chained
732 * @val: value to write
734 * Writes up to 4 bytes of data to the serial flash. The location of
735 * the write needs to be specified prior to calling this by issuing the
736 * appropriate commands to the serial flash.
738 static int sf1_write(adapter_t *adapter, unsigned int byte_cnt, int cont,
741 if (!byte_cnt || byte_cnt > 4)
743 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
745 t3_write_reg(adapter, A_SF_DATA, val);
746 t3_write_reg(adapter, A_SF_OP,
747 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
748 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
752 * flash_wait_op - wait for a flash operation to complete
753 * @adapter: the adapter
754 * @attempts: max number of polls of the status register
755 * @delay: delay between polls in ms
757 * Wait for a flash operation to complete by polling the status register.
759 static int flash_wait_op(adapter_t *adapter, int attempts, int delay)
765 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
766 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
778 * t3_read_flash - read words from serial flash
779 * @adapter: the adapter
780 * @addr: the start address for the read
781 * @nwords: how many 32-bit words to read
782 * @data: where to store the read data
783 * @byte_oriented: whether to store data as bytes or as words
785 * Read the specified number of 32-bit words from the serial flash.
786 * If @byte_oriented is set the read data is stored as a byte array
787 * (i.e., big-endian), otherwise as 32-bit words in the platform's
790 int t3_read_flash(adapter_t *adapter, unsigned int addr, unsigned int nwords,
791 u32 *data, int byte_oriented)
795 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
798 addr = swab32(addr) | SF_RD_DATA_FAST;
800 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
801 (ret = sf1_read(adapter, 1, 1, data)) != 0)
804 for ( ; nwords; nwords--, data++) {
805 ret = sf1_read(adapter, 4, nwords > 1, data);
809 *data = htonl(*data);
815 * t3_write_flash - write up to a page of data to the serial flash
816 * @adapter: the adapter
817 * @addr: the start address to write
818 * @n: length of data to write
819 * @data: the data to write
821 * Writes up to a page of data (256 bytes) to the serial flash starting
822 * at the given address.
824 static int t3_write_flash(adapter_t *adapter, unsigned int addr,
825 unsigned int n, const u8 *data)
829 unsigned int i, c, left, val, offset = addr & 0xff;
831 if (addr + n > SF_SIZE || offset + n > 256)
834 val = swab32(addr) | SF_PROG_PAGE;
836 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
837 (ret = sf1_write(adapter, 4, 1, val)) != 0)
840 for (left = n; left; left -= c) {
842 for (val = 0, i = 0; i < c; ++i)
843 val = (val << 8) + *data++;
845 ret = sf1_write(adapter, c, c != left, val);
849 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
852 /* Read the page to verify the write succeeded */
853 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
857 if (memcmp(data - n, (u8 *)buf + offset, n))
863 * t3_get_tp_version - read the tp sram version
864 * @adapter: the adapter
865 * @vers: where to place the version
867 * Reads the protocol sram version from sram.
869 int t3_get_tp_version(adapter_t *adapter, u32 *vers)
873 /* Get version loaded in SRAM */
874 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
875 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
880 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
886 * t3_check_tpsram_version - read the tp sram version
887 * @adapter: the adapter
890 int t3_check_tpsram_version(adapter_t *adapter)
894 unsigned int major, minor;
896 /* Get version loaded in SRAM */
897 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
898 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
903 vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
905 major = G_TP_VERSION_MAJOR(vers);
906 minor = G_TP_VERSION_MINOR(vers);
908 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
911 CH_WARN(adapter, "found wrong TP version (%u.%u), "
912 "driver needs version %d.%d\n", major, minor,
913 TP_VERSION_MAJOR, TP_VERSION_MINOR);
918 * t3_check_tpsram - check if provided protocol SRAM
919 * is compatible with this driver
920 * @adapter: the adapter
921 * @tp_sram: the firmware image to write
924 * Checks if an adapter's tp sram is compatible with the driver.
925 * Returns 0 if the versions are compatible, a negative error otherwise.
927 int t3_check_tpsram(adapter_t *adapter, const u8 *tp_sram, unsigned int size)
931 const u32 *p = (const u32 *)tp_sram;
933 /* Verify checksum */
934 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
936 if (csum != 0xffffffff) {
937 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
945 enum fw_version_type {
951 * t3_get_fw_version - read the firmware version
952 * @adapter: the adapter
953 * @vers: where to place the version
955 * Reads the FW version from flash.
957 int t3_get_fw_version(adapter_t *adapter, u32 *vers)
959 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
963 * t3_check_fw_version - check if the FW is compatible with this driver
964 * @adapter: the adapter
966 * Checks if an adapter's FW is compatible with the driver. Returns 0
967 * if the versions are compatible, a negative error otherwise.
969 int t3_check_fw_version(adapter_t *adapter)
973 unsigned int type, major, minor;
975 ret = t3_get_fw_version(adapter, &vers);
979 type = G_FW_VERSION_TYPE(vers);
980 major = G_FW_VERSION_MAJOR(vers);
981 minor = G_FW_VERSION_MINOR(vers);
983 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
984 minor == FW_VERSION_MINOR)
987 CH_WARN(adapter, "found wrong FW version (%u.%u), "
988 "driver needs version %d.%d\n", major, minor,
989 FW_VERSION_MAJOR, FW_VERSION_MINOR);
994 * t3_flash_erase_sectors - erase a range of flash sectors
995 * @adapter: the adapter
996 * @start: the first sector to erase
997 * @end: the last sector to erase
999 * Erases the sectors in the given range.
1001 static int t3_flash_erase_sectors(adapter_t *adapter, int start, int end)
1003 while (start <= end) {
1006 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1007 (ret = sf1_write(adapter, 4, 0,
1008 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1009 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1017 * t3_load_fw - download firmware
1018 * @adapter: the adapter
1019 * @fw_data: the firmware image to write
1022 * Write the supplied firmware image to the card's serial flash.
1023 * The FW image has the following sections: @size - 8 bytes of code and
1024 * data, followed by 4 bytes of FW version, followed by the 32-bit
1025 * 1's complement checksum of the whole image.
1027 int t3_load_fw(adapter_t *adapter, const u8 *fw_data, unsigned int size)
1031 const u32 *p = (const u32 *)fw_data;
1032 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1034 if ((size & 3) || size < FW_MIN_SIZE)
1036 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1039 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1040 csum += ntohl(p[i]);
1041 if (csum != 0xffffffff) {
1042 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1047 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1051 size -= 8; /* trim off version and checksum */
1052 for (addr = FW_FLASH_BOOT_ADDR; size; ) {
1053 unsigned int chunk_size = min(size, 256U);
1055 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1060 fw_data += chunk_size;
1064 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1067 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1071 #define CIM_CTL_BASE 0x2000
1074 * t3_cim_ctl_blk_read - read a block from CIM control region
1075 * @adap: the adapter
1076 * @addr: the start address within the CIM control region
1077 * @n: number of words to read
1078 * @valp: where to store the result
1080 * Reads a block of 4-byte words from the CIM control region.
1082 int t3_cim_ctl_blk_read(adapter_t *adap, unsigned int addr, unsigned int n,
1087 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1090 for ( ; !ret && n--; addr += 4) {
1091 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1092 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1095 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1101 * t3_link_changed - handle interface link changes
1102 * @adapter: the adapter
1103 * @port_id: the port index that changed link state
1105 * Called when a port's link settings change to propagate the new values
1106 * to the associated PHY and MAC. After performing the common tasks it
1107 * invokes an OS-specific handler.
1109 void t3_link_changed(adapter_t *adapter, int port_id)
1111 int link_ok, speed, duplex, fc;
1112 struct port_info *pi = adap2pinfo(adapter, port_id);
1113 struct cphy *phy = &pi->phy;
1114 struct cmac *mac = &pi->mac;
1115 struct link_config *lc = &pi->link_config;
1117 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1119 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1120 uses_xaui(adapter)) {
1123 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1124 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1126 lc->link_ok = (unsigned char)link_ok;
1127 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1128 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1129 if (lc->requested_fc & PAUSE_AUTONEG)
1130 fc &= lc->requested_fc;
1132 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1134 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1135 /* Set MAC speed, duplex, and flow control to match PHY. */
1136 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1137 lc->fc = (unsigned char)fc;
1140 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1144 * t3_link_start - apply link configuration to MAC/PHY
1145 * @phy: the PHY to setup
1146 * @mac: the MAC to setup
1147 * @lc: the requested link configuration
1149 * Set up a port's MAC and PHY according to a desired link configuration.
1150 * - If the PHY can auto-negotiate first decide what to advertise, then
1151 * enable/disable auto-negotiation as desired, and reset.
1152 * - If the PHY does not auto-negotiate just reset it.
1153 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1154 * otherwise do it later based on the outcome of auto-negotiation.
1156 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1158 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1161 if (lc->supported & SUPPORTED_Autoneg) {
1162 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1164 lc->advertising |= ADVERTISED_Asym_Pause;
1166 lc->advertising |= ADVERTISED_Pause;
1168 phy->ops->advertise(phy, lc->advertising);
1170 if (lc->autoneg == AUTONEG_DISABLE) {
1171 lc->speed = lc->requested_speed;
1172 lc->duplex = lc->requested_duplex;
1173 lc->fc = (unsigned char)fc;
1174 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1176 /* Also disables autoneg */
1177 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1178 phy->ops->reset(phy, 0);
1180 phy->ops->autoneg_enable(phy);
1182 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1183 lc->fc = (unsigned char)fc;
1184 phy->ops->reset(phy, 0);
1190 * t3_set_vlan_accel - control HW VLAN extraction
1191 * @adapter: the adapter
1192 * @ports: bitmap of adapter ports to operate on
1193 * @on: enable (1) or disable (0) HW VLAN extraction
1195 * Enables or disables HW extraction of VLAN tags for the given port.
1197 void t3_set_vlan_accel(adapter_t *adapter, unsigned int ports, int on)
1199 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1200 ports << S_VLANEXTRACTIONENABLE,
1201 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1205 unsigned int mask; /* bits to check in interrupt status */
1206 const char *msg; /* message to print or NULL */
1207 short stat_idx; /* stat counter to increment or -1 */
1208 unsigned short fatal:1; /* whether the condition reported is fatal */
1212 * t3_handle_intr_status - table driven interrupt handler
1213 * @adapter: the adapter that generated the interrupt
1214 * @reg: the interrupt status register to process
1215 * @mask: a mask to apply to the interrupt status
1216 * @acts: table of interrupt actions
1217 * @stats: statistics counters tracking interrupt occurences
1219 * A table driven interrupt handler that applies a set of masks to an
1220 * interrupt status word and performs the corresponding actions if the
1221 * interrupts described by the mask have occured. The actions include
1222 * optionally printing a warning or alert message, and optionally
1223 * incrementing a stat counter. The table is terminated by an entry
1224 * specifying mask 0. Returns the number of fatal interrupt conditions.
1226 static int t3_handle_intr_status(adapter_t *adapter, unsigned int reg,
1228 const struct intr_info *acts,
1229 unsigned long *stats)
1232 unsigned int status = t3_read_reg(adapter, reg) & mask;
1234 for ( ; acts->mask; ++acts) {
1235 if (!(status & acts->mask)) continue;
1238 CH_ALERT(adapter, "%s (0x%x)\n",
1239 acts->msg, status & acts->mask);
1240 } else if (acts->msg)
1241 CH_WARN(adapter, "%s (0x%x)\n",
1242 acts->msg, status & acts->mask);
1243 if (acts->stat_idx >= 0)
1244 stats[acts->stat_idx]++;
1246 if (status) /* clear processed interrupts */
1247 t3_write_reg(adapter, reg, status);
1251 #define SGE_INTR_MASK (F_RSPQDISABLED)
1252 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1253 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1255 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1256 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1257 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1258 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1259 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1260 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1261 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1262 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1263 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1264 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1265 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1266 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1267 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1268 V_BISTERR(M_BISTERR) | F_PEXERR)
1269 #define ULPRX_INTR_MASK F_PARERR
1270 #define ULPTX_INTR_MASK 0
1271 #define CPLSW_INTR_MASK (F_TP_FRAMING_ERROR | \
1272 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1273 F_ZERO_SWITCH_ERROR)
1274 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1275 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1276 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1277 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT)
1278 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1279 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1280 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1281 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1282 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1283 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1284 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1285 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1286 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1287 V_MCAPARERRENB(M_MCAPARERRENB))
1288 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1289 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1290 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1291 F_MPS0 | F_CPL_SWITCH)
1294 * Interrupt handler for the PCIX1 module.
1296 static void pci_intr_handler(adapter_t *adapter)
1298 static struct intr_info pcix1_intr_info[] = {
1299 { F_MSTDETPARERR, "PCI master detected parity error", -1, 1 },
1300 { F_SIGTARABT, "PCI signaled target abort", -1, 1 },
1301 { F_RCVTARABT, "PCI received target abort", -1, 1 },
1302 { F_RCVMSTABT, "PCI received master abort", -1, 1 },
1303 { F_SIGSYSERR, "PCI signaled system error", -1, 1 },
1304 { F_DETPARERR, "PCI detected parity error", -1, 1 },
1305 { F_SPLCMPDIS, "PCI split completion discarded", -1, 1 },
1306 { F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1 },
1307 { F_RCVSPLCMPERR, "PCI received split completion error", -1,
1309 { F_DETCORECCERR, "PCI correctable ECC error",
1310 STAT_PCI_CORR_ECC, 0 },
1311 { F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1 },
1312 { F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1313 { V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1315 { V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1317 { V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1319 { V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1324 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1325 pcix1_intr_info, adapter->irq_stats))
1326 t3_fatal_err(adapter);
1330 * Interrupt handler for the PCIE module.
1332 static void pcie_intr_handler(adapter_t *adapter)
1334 static struct intr_info pcie_intr_info[] = {
1335 { F_PEXERR, "PCI PEX error", -1, 1 },
1337 "PCI unexpected split completion DMA read error", -1, 1 },
1339 "PCI unexpected split completion DMA command error", -1, 1 },
1340 { F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1341 { F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1 },
1342 { F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1 },
1343 { F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1 },
1344 { V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1345 "PCI MSI-X table/PBA parity error", -1, 1 },
1346 { V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1 },
1350 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1351 CH_ALERT(adapter, "PEX error code 0x%x\n",
1352 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1354 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1355 pcie_intr_info, adapter->irq_stats))
1356 t3_fatal_err(adapter);
1360 * TP interrupt handler.
1362 static void tp_intr_handler(adapter_t *adapter)
1364 static struct intr_info tp_intr_info[] = {
1365 { 0xffffff, "TP parity error", -1, 1 },
1366 { 0x1000000, "TP out of Rx pages", -1, 1 },
1367 { 0x2000000, "TP out of Tx pages", -1, 1 },
1371 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1372 tp_intr_info, NULL))
1373 t3_fatal_err(adapter);
1377 * CIM interrupt handler.
1379 static void cim_intr_handler(adapter_t *adapter)
1381 static struct intr_info cim_intr_info[] = {
1382 { F_RSVDSPACEINT, "CIM reserved space write", -1, 1 },
1383 { F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1 },
1384 { F_FLASHRANGEINT, "CIM flash address out of range", -1, 1 },
1385 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1386 { F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1 },
1387 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1388 { F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1 },
1389 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1390 { F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1 },
1391 { F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1 },
1392 { F_BLKRDPLINT, "CIM block read from PL space", -1, 1 },
1393 { F_BLKWRPLINT, "CIM block write to PL space", -1, 1 },
1397 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1398 cim_intr_info, NULL))
1399 t3_fatal_err(adapter);
1403 * ULP RX interrupt handler.
1405 static void ulprx_intr_handler(adapter_t *adapter)
1407 static struct intr_info ulprx_intr_info[] = {
1408 { F_PARERR, "ULP RX parity error", -1, 1 },
1412 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1413 ulprx_intr_info, NULL))
1414 t3_fatal_err(adapter);
1418 * ULP TX interrupt handler.
1420 static void ulptx_intr_handler(adapter_t *adapter)
1422 static struct intr_info ulptx_intr_info[] = {
1423 { F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1424 STAT_ULP_CH0_PBL_OOB, 0 },
1425 { F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1426 STAT_ULP_CH1_PBL_OOB, 0 },
1430 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1431 ulptx_intr_info, adapter->irq_stats))
1432 t3_fatal_err(adapter);
1435 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1436 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1437 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1438 F_ICSPI1_TX_FRAMING_ERROR)
1439 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1440 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1441 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1442 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1445 * PM TX interrupt handler.
1447 static void pmtx_intr_handler(adapter_t *adapter)
1449 static struct intr_info pmtx_intr_info[] = {
1450 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1451 { ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1 },
1452 { OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1 },
1453 { V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1454 "PMTX ispi parity error", -1, 1 },
1455 { V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1456 "PMTX ospi parity error", -1, 1 },
1460 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1461 pmtx_intr_info, NULL))
1462 t3_fatal_err(adapter);
1465 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1466 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1467 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1468 F_IESPI1_TX_FRAMING_ERROR)
1469 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1470 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1471 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1472 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1475 * PM RX interrupt handler.
1477 static void pmrx_intr_handler(adapter_t *adapter)
1479 static struct intr_info pmrx_intr_info[] = {
1480 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1481 { IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1 },
1482 { OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1 },
1483 { V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1484 "PMRX ispi parity error", -1, 1 },
1485 { V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1486 "PMRX ospi parity error", -1, 1 },
1490 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1491 pmrx_intr_info, NULL))
1492 t3_fatal_err(adapter);
1496 * CPL switch interrupt handler.
1498 static void cplsw_intr_handler(adapter_t *adapter)
1500 static struct intr_info cplsw_intr_info[] = {
1501 // { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 },
1502 { F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1 },
1503 { F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1 },
1504 { F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1 },
1505 { F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1 },
1509 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1510 cplsw_intr_info, NULL))
1511 t3_fatal_err(adapter);
1515 * MPS interrupt handler.
1517 static void mps_intr_handler(adapter_t *adapter)
1519 static struct intr_info mps_intr_info[] = {
1520 { 0x1ff, "MPS parity error", -1, 1 },
1524 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1525 mps_intr_info, NULL))
1526 t3_fatal_err(adapter);
1529 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1532 * MC7 interrupt handler.
1534 static void mc7_intr_handler(struct mc7 *mc7)
1536 adapter_t *adapter = mc7->adapter;
1537 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1540 mc7->stats.corr_err++;
1541 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1542 "data 0x%x 0x%x 0x%x\n", mc7->name,
1543 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1544 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1545 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1546 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1550 mc7->stats.uncorr_err++;
1551 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1552 "data 0x%x 0x%x 0x%x\n", mc7->name,
1553 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1554 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1555 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1556 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1560 mc7->stats.parity_err++;
1561 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1562 mc7->name, G_PE(cause));
1568 if (adapter->params.rev > 0)
1569 addr = t3_read_reg(adapter,
1570 mc7->offset + A_MC7_ERR_ADDR);
1571 mc7->stats.addr_err++;
1572 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1576 if (cause & MC7_INTR_FATAL)
1577 t3_fatal_err(adapter);
1579 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1582 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1583 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1585 * XGMAC interrupt handler.
1587 static int mac_intr_handler(adapter_t *adap, unsigned int idx)
1592 idx = idx == 0 ? 0 : adapter_info(adap)->nports0; /* MAC idx -> port */
1593 mac = &adap2pinfo(adap, idx)->mac;
1594 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1596 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1597 mac->stats.tx_fifo_parity_err++;
1598 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1600 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1601 mac->stats.rx_fifo_parity_err++;
1602 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1604 if (cause & F_TXFIFO_UNDERRUN)
1605 mac->stats.tx_fifo_urun++;
1606 if (cause & F_RXFIFO_OVERFLOW)
1607 mac->stats.rx_fifo_ovfl++;
1608 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1609 mac->stats.serdes_signal_loss++;
1610 if (cause & F_XAUIPCSCTCERR)
1611 mac->stats.xaui_pcs_ctc_err++;
1612 if (cause & F_XAUIPCSALIGNCHANGE)
1613 mac->stats.xaui_pcs_align_change++;
1615 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1616 if (cause & XGM_INTR_FATAL)
1622 * Interrupt handler for PHY events.
1624 int t3_phy_intr_handler(adapter_t *adapter)
1626 u32 mask, gpi = adapter_info(adapter)->gpio_intr;
1627 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1629 for_each_port(adapter, i) {
1630 struct port_info *p = adap2pinfo(adapter, i);
1632 mask = gpi - (gpi & (gpi - 1));
1635 if (!(p->port_type->caps & SUPPORTED_IRQ))
1639 int phy_cause = p->phy.ops->intr_handler(&p->phy);
1641 if (phy_cause & cphy_cause_link_change)
1642 t3_link_changed(adapter, i);
1643 if (phy_cause & cphy_cause_fifo_error)
1644 p->phy.fifo_errors++;
1648 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1653 * t3_slow_intr_handler - control path interrupt handler
1654 * @adapter: the adapter
1656 * T3 interrupt handler for non-data interrupt events, e.g., errors.
1657 * The designation 'slow' is because it involves register reads, while
1658 * data interrupts typically don't involve any MMIOs.
1660 int t3_slow_intr_handler(adapter_t *adapter)
1662 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1664 cause &= adapter->slow_intr_mask;
1667 if (cause & F_PCIM0) {
1668 if (is_pcie(adapter))
1669 pcie_intr_handler(adapter);
1671 pci_intr_handler(adapter);
1674 t3_sge_err_intr_handler(adapter);
1675 if (cause & F_MC7_PMRX)
1676 mc7_intr_handler(&adapter->pmrx);
1677 if (cause & F_MC7_PMTX)
1678 mc7_intr_handler(&adapter->pmtx);
1679 if (cause & F_MC7_CM)
1680 mc7_intr_handler(&adapter->cm);
1682 cim_intr_handler(adapter);
1684 tp_intr_handler(adapter);
1685 if (cause & F_ULP2_RX)
1686 ulprx_intr_handler(adapter);
1687 if (cause & F_ULP2_TX)
1688 ulptx_intr_handler(adapter);
1689 if (cause & F_PM1_RX)
1690 pmrx_intr_handler(adapter);
1691 if (cause & F_PM1_TX)
1692 pmtx_intr_handler(adapter);
1693 if (cause & F_CPL_SWITCH)
1694 cplsw_intr_handler(adapter);
1696 mps_intr_handler(adapter);
1698 t3_mc5_intr_handler(&adapter->mc5);
1699 if (cause & F_XGMAC0_0)
1700 mac_intr_handler(adapter, 0);
1701 if (cause & F_XGMAC0_1)
1702 mac_intr_handler(adapter, 1);
1703 if (cause & F_T3DBG)
1704 t3_os_ext_intr_handler(adapter);
1706 /* Clear the interrupts just processed. */
1707 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1708 (void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1713 * t3_intr_enable - enable interrupts
1714 * @adapter: the adapter whose interrupts should be enabled
1716 * Enable interrupts by setting the interrupt enable registers of the
1717 * various HW modules and then enabling the top-level interrupt
1720 void t3_intr_enable(adapter_t *adapter)
1722 static struct addr_val_pair intr_en_avp[] = {
1723 { A_SG_INT_ENABLE, SGE_INTR_MASK },
1724 { A_MC7_INT_ENABLE, MC7_INTR_MASK },
1725 { A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1727 { A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1729 { A_MC5_DB_INT_ENABLE, MC5_INTR_MASK },
1730 { A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK },
1731 { A_TP_INT_ENABLE, 0x3bfffff },
1732 { A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK },
1733 { A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK },
1734 { A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK },
1735 { A_MPS_INT_ENABLE, MPS_INTR_MASK },
1738 adapter->slow_intr_mask = PL_INTR_MASK;
1740 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1742 if (adapter->params.rev > 0) {
1743 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1744 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1745 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1746 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1747 F_PBL_BOUND_ERR_CH1);
1749 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1750 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1753 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW,
1754 adapter_info(adapter)->gpio_intr);
1755 t3_write_reg(adapter, A_T3DBG_INT_ENABLE,
1756 adapter_info(adapter)->gpio_intr);
1757 if (is_pcie(adapter))
1758 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1760 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1761 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1762 (void) t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1766 * t3_intr_disable - disable a card's interrupts
1767 * @adapter: the adapter whose interrupts should be disabled
1769 * Disable interrupts. We only disable the top-level interrupt
1770 * concentrator and the SGE data interrupts.
1772 void t3_intr_disable(adapter_t *adapter)
1774 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1775 (void) t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1776 adapter->slow_intr_mask = 0;
1780 * t3_intr_clear - clear all interrupts
1781 * @adapter: the adapter whose interrupts should be cleared
1783 * Clears all interrupts.
1785 void t3_intr_clear(adapter_t *adapter)
1787 static const unsigned int cause_reg_addr[] = {
1789 A_SG_RSPQ_FL_STATUS,
1792 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1793 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1794 A_CIM_HOST_INT_CAUSE,
1807 /* Clear PHY and MAC interrupts for each port. */
1808 for_each_port(adapter, i)
1809 t3_port_intr_clear(adapter, i);
1811 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1812 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1814 if (is_pcie(adapter))
1815 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
1816 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1817 (void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1821 * t3_port_intr_enable - enable port-specific interrupts
1822 * @adapter: associated adapter
1823 * @idx: index of port whose interrupts should be enabled
1825 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1828 void t3_port_intr_enable(adapter_t *adapter, int idx)
1830 struct port_info *pi = adap2pinfo(adapter, idx);
1832 t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, XGM_INTR_MASK);
1833 pi->phy.ops->intr_enable(&pi->phy);
1837 * t3_port_intr_disable - disable port-specific interrupts
1838 * @adapter: associated adapter
1839 * @idx: index of port whose interrupts should be disabled
1841 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
1844 void t3_port_intr_disable(adapter_t *adapter, int idx)
1846 struct port_info *pi = adap2pinfo(adapter, idx);
1848 t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, 0);
1849 pi->phy.ops->intr_disable(&pi->phy);
1853 * t3_port_intr_clear - clear port-specific interrupts
1854 * @adapter: associated adapter
1855 * @idx: index of port whose interrupts to clear
1857 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
1860 void t3_port_intr_clear(adapter_t *adapter, int idx)
1862 struct port_info *pi = adap2pinfo(adapter, idx);
1864 t3_write_reg(adapter, A_XGM_INT_CAUSE + pi->mac.offset, 0xffffffff);
1865 pi->phy.ops->intr_clear(&pi->phy);
1868 #define SG_CONTEXT_CMD_ATTEMPTS 100
1871 * t3_sge_write_context - write an SGE context
1872 * @adapter: the adapter
1873 * @id: the context id
1874 * @type: the context type
1876 * Program an SGE context with the values already loaded in the
1877 * CONTEXT_DATA? registers.
1879 static int t3_sge_write_context(adapter_t *adapter, unsigned int id,
1882 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
1883 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
1884 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
1885 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
1886 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1887 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
1888 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1889 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
1893 * t3_sge_init_ecntxt - initialize an SGE egress context
1894 * @adapter: the adapter to configure
1895 * @id: the context id
1896 * @gts_enable: whether to enable GTS for the context
1897 * @type: the egress context type
1898 * @respq: associated response queue
1899 * @base_addr: base address of queue
1900 * @size: number of queue entries
1902 * @gen: initial generation value for the context
1903 * @cidx: consumer pointer
1905 * Initialize an SGE egress context and make it ready for use. If the
1906 * platform allows concurrent context operations, the caller is
1907 * responsible for appropriate locking.
1909 int t3_sge_init_ecntxt(adapter_t *adapter, unsigned int id, int gts_enable,
1910 enum sge_context_type type, int respq, u64 base_addr,
1911 unsigned int size, unsigned int token, int gen,
1914 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
1916 if (base_addr & 0xfff) /* must be 4K aligned */
1918 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1922 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
1923 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
1924 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
1925 V_EC_BASE_LO((u32)base_addr & 0xffff));
1927 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, (u32)base_addr);
1929 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1930 V_EC_BASE_HI((u32)base_addr & 0xf) | V_EC_RESPQ(respq) |
1931 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
1933 return t3_sge_write_context(adapter, id, F_EGRESS);
1937 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
1938 * @adapter: the adapter to configure
1939 * @id: the context id
1940 * @gts_enable: whether to enable GTS for the context
1941 * @base_addr: base address of queue
1942 * @size: number of queue entries
1943 * @bsize: size of each buffer for this queue
1944 * @cong_thres: threshold to signal congestion to upstream producers
1945 * @gen: initial generation value for the context
1946 * @cidx: consumer pointer
1948 * Initialize an SGE free list context and make it ready for use. The
1949 * caller is responsible for ensuring only one context operation occurs
1952 int t3_sge_init_flcntxt(adapter_t *adapter, unsigned int id, int gts_enable,
1953 u64 base_addr, unsigned int size, unsigned int bsize,
1954 unsigned int cong_thres, int gen, unsigned int cidx)
1956 if (base_addr & 0xfff) /* must be 4K aligned */
1958 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1962 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, (u32)base_addr);
1964 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
1965 V_FL_BASE_HI((u32)base_addr) |
1966 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
1967 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
1968 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
1969 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
1970 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1971 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
1972 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
1973 return t3_sge_write_context(adapter, id, F_FREELIST);
1977 * t3_sge_init_rspcntxt - initialize an SGE response queue context
1978 * @adapter: the adapter to configure
1979 * @id: the context id
1980 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
1981 * @base_addr: base address of queue
1982 * @size: number of queue entries
1983 * @fl_thres: threshold for selecting the normal or jumbo free list
1984 * @gen: initial generation value for the context
1985 * @cidx: consumer pointer
1987 * Initialize an SGE response queue context and make it ready for use.
1988 * The caller is responsible for ensuring only one context operation
1991 int t3_sge_init_rspcntxt(adapter_t *adapter, unsigned int id, int irq_vec_idx,
1992 u64 base_addr, unsigned int size,
1993 unsigned int fl_thres, int gen, unsigned int cidx)
1995 unsigned int intr = 0;
1997 if (base_addr & 0xfff) /* must be 4K aligned */
1999 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2003 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2005 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
2007 if (irq_vec_idx >= 0)
2008 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2009 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2010 V_CQ_BASE_HI((u32)base_addr) | intr | V_RQ_GEN(gen));
2011 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2012 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2016 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2017 * @adapter: the adapter to configure
2018 * @id: the context id
2019 * @base_addr: base address of queue
2020 * @size: number of queue entries
2021 * @rspq: response queue for async notifications
2022 * @ovfl_mode: CQ overflow mode
2023 * @credits: completion queue credits
2024 * @credit_thres: the credit threshold
2026 * Initialize an SGE completion queue context and make it ready for use.
2027 * The caller is responsible for ensuring only one context operation
2030 int t3_sge_init_cqcntxt(adapter_t *adapter, unsigned int id, u64 base_addr,
2031 unsigned int size, int rspq, int ovfl_mode,
2032 unsigned int credits, unsigned int credit_thres)
2034 if (base_addr & 0xfff) /* must be 4K aligned */
2036 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2040 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2041 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
2043 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2044 V_CQ_BASE_HI((u32)base_addr) | V_CQ_RSPQ(rspq) |
2045 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2046 V_CQ_ERR(ovfl_mode));
2047 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2048 V_CQ_CREDIT_THRES(credit_thres));
2049 return t3_sge_write_context(adapter, id, F_CQ);
2053 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2054 * @adapter: the adapter
2055 * @id: the egress context id
2056 * @enable: enable (1) or disable (0) the context
2058 * Enable or disable an SGE egress context. The caller is responsible for
2059 * ensuring only one context operation occurs at a time.
2061 int t3_sge_enable_ecntxt(adapter_t *adapter, unsigned int id, int enable)
2063 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2066 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2067 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2068 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2069 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2070 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2071 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2072 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2073 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2074 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2078 * t3_sge_disable_fl - disable an SGE free-buffer list
2079 * @adapter: the adapter
2080 * @id: the free list context id
2082 * Disable an SGE free-buffer list. The caller is responsible for
2083 * ensuring only one context operation occurs at a time.
2085 int t3_sge_disable_fl(adapter_t *adapter, unsigned int id)
2087 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2090 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2091 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2092 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2093 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2094 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2095 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2096 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2097 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2098 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2102 * t3_sge_disable_rspcntxt - disable an SGE response queue
2103 * @adapter: the adapter
2104 * @id: the response queue context id
2106 * Disable an SGE response queue. The caller is responsible for
2107 * ensuring only one context operation occurs at a time.
2109 int t3_sge_disable_rspcntxt(adapter_t *adapter, unsigned int id)
2111 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2114 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2115 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2116 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2117 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2118 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2119 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2120 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2121 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2122 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2126 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2127 * @adapter: the adapter
2128 * @id: the completion queue context id
2130 * Disable an SGE completion queue. The caller is responsible for
2131 * ensuring only one context operation occurs at a time.
2133 int t3_sge_disable_cqcntxt(adapter_t *adapter, unsigned int id)
2135 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2138 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2139 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2140 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2141 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2142 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2143 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2144 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2145 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2146 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2150 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2151 * @adapter: the adapter
2152 * @id: the context id
2153 * @op: the operation to perform
2154 * @credits: credits to return to the CQ
2156 * Perform the selected operation on an SGE completion queue context.
2157 * The caller is responsible for ensuring only one context operation
2160 * For most operations the function returns the current HW position in
2161 * the completion queue.
2163 int t3_sge_cqcntxt_op(adapter_t *adapter, unsigned int id, unsigned int op,
2164 unsigned int credits)
2168 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2171 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2172 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2173 V_CONTEXT(id) | F_CQ);
2174 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2175 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2178 if (op >= 2 && op < 7) {
2179 if (adapter->params.rev > 0)
2180 return G_CQ_INDEX(val);
2182 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2183 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2184 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2185 F_CONTEXT_CMD_BUSY, 0,
2186 SG_CONTEXT_CMD_ATTEMPTS, 1))
2188 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2194 * t3_sge_read_context - read an SGE context
2195 * @type: the context type
2196 * @adapter: the adapter
2197 * @id: the context id
2198 * @data: holds the retrieved context
2200 * Read an SGE egress context. The caller is responsible for ensuring
2201 * only one context operation occurs at a time.
2203 static int t3_sge_read_context(unsigned int type, adapter_t *adapter,
2204 unsigned int id, u32 data[4])
2206 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2209 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2210 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2211 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2212 SG_CONTEXT_CMD_ATTEMPTS, 1))
2214 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2215 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2216 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2217 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2222 * t3_sge_read_ecntxt - read an SGE egress context
2223 * @adapter: the adapter
2224 * @id: the context id
2225 * @data: holds the retrieved context
2227 * Read an SGE egress context. The caller is responsible for ensuring
2228 * only one context operation occurs at a time.
2230 int t3_sge_read_ecntxt(adapter_t *adapter, unsigned int id, u32 data[4])
2234 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2238 * t3_sge_read_cq - read an SGE CQ context
2239 * @adapter: the adapter
2240 * @id: the context id
2241 * @data: holds the retrieved context
2243 * Read an SGE CQ context. The caller is responsible for ensuring
2244 * only one context operation occurs at a time.
2246 int t3_sge_read_cq(adapter_t *adapter, unsigned int id, u32 data[4])
2250 return t3_sge_read_context(F_CQ, adapter, id, data);
2254 * t3_sge_read_fl - read an SGE free-list context
2255 * @adapter: the adapter
2256 * @id: the context id
2257 * @data: holds the retrieved context
2259 * Read an SGE free-list context. The caller is responsible for ensuring
2260 * only one context operation occurs at a time.
2262 int t3_sge_read_fl(adapter_t *adapter, unsigned int id, u32 data[4])
2264 if (id >= SGE_QSETS * 2)
2266 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2270 * t3_sge_read_rspq - read an SGE response queue context
2271 * @adapter: the adapter
2272 * @id: the context id
2273 * @data: holds the retrieved context
2275 * Read an SGE response queue context. The caller is responsible for
2276 * ensuring only one context operation occurs at a time.
2278 int t3_sge_read_rspq(adapter_t *adapter, unsigned int id, u32 data[4])
2280 if (id >= SGE_QSETS)
2282 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2286 * t3_config_rss - configure Rx packet steering
2287 * @adapter: the adapter
2288 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2289 * @cpus: values for the CPU lookup table (0xff terminated)
2290 * @rspq: values for the response queue lookup table (0xffff terminated)
2292 * Programs the receive packet steering logic. @cpus and @rspq provide
2293 * the values for the CPU and response queue lookup tables. If they
2294 * provide fewer values than the size of the tables the supplied values
2295 * are used repeatedly until the tables are fully populated.
2297 void t3_config_rss(adapter_t *adapter, unsigned int rss_config, const u8 *cpus,
2300 int i, j, cpu_idx = 0, q_idx = 0;
2303 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2306 for (j = 0; j < 2; ++j) {
2307 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2308 if (cpus[cpu_idx] == 0xff)
2311 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2315 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2316 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2317 (i << 16) | rspq[q_idx++]);
2318 if (rspq[q_idx] == 0xffff)
2322 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2326 * t3_read_rss - read the contents of the RSS tables
2327 * @adapter: the adapter
2328 * @lkup: holds the contents of the RSS lookup table
2329 * @map: holds the contents of the RSS map table
2331 * Reads the contents of the receive packet steering tables.
2333 int t3_read_rss(adapter_t *adapter, u8 *lkup, u16 *map)
2339 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2340 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2342 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2343 if (!(val & 0x80000000))
2346 *lkup++ = (u8)(val >> 8);
2350 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2351 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2353 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2354 if (!(val & 0x80000000))
2362 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2363 * @adap: the adapter
2364 * @enable: 1 to select offload mode, 0 for regular NIC
2366 * Switches TP to NIC/offload mode.
2368 void t3_tp_set_offload_mode(adapter_t *adap, int enable)
2370 if (is_offload(adap) || !enable)
2371 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2372 V_NICMODE(!enable));
2376 * tp_wr_bits_indirect - set/clear bits in an indirect TP register
2377 * @adap: the adapter
2378 * @addr: the indirect TP register address
2379 * @mask: specifies the field within the register to modify
2380 * @val: new value for the field
2382 * Sets a field of an indirect TP register to the given value.
2384 static void tp_wr_bits_indirect(adapter_t *adap, unsigned int addr,
2385 unsigned int mask, unsigned int val)
2387 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2388 val |= t3_read_reg(adap, A_TP_PIO_DATA) & ~mask;
2389 t3_write_reg(adap, A_TP_PIO_DATA, val);
2393 * t3_enable_filters - enable the HW filters
2394 * @adap: the adapter
2396 * Enables the HW filters for NIC traffic.
2398 void t3_enable_filters(adapter_t *adap)
2400 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE, 0);
2401 t3_set_reg_field(adap, A_MC5_DB_CONFIG, 0, F_FILTEREN);
2402 t3_set_reg_field(adap, A_TP_GLOBAL_CONFIG, 0, V_FIVETUPLELOOKUP(3));
2403 tp_wr_bits_indirect(adap, A_TP_INGRESS_CONFIG, 0, F_LOOKUPEVERYPKT);
2407 * pm_num_pages - calculate the number of pages of the payload memory
2408 * @mem_size: the size of the payload memory
2409 * @pg_size: the size of each payload memory page
2411 * Calculate the number of pages, each of the given size, that fit in a
2412 * memory of the specified size, respecting the HW requirement that the
2413 * number of pages must be a multiple of 24.
2415 static inline unsigned int pm_num_pages(unsigned int mem_size,
2416 unsigned int pg_size)
2418 unsigned int n = mem_size / pg_size;
2423 #define mem_region(adap, start, size, reg) \
2424 t3_write_reg((adap), A_ ## reg, (start)); \
2428 * partition_mem - partition memory and configure TP memory settings
2429 * @adap: the adapter
2430 * @p: the TP parameters
2432 * Partitions context and payload memory and configures TP's memory
2435 static void partition_mem(adapter_t *adap, const struct tp_params *p)
2437 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2438 unsigned int timers = 0, timers_shift = 22;
2440 if (adap->params.rev > 0) {
2441 if (tids <= 16 * 1024) {
2444 } else if (tids <= 64 * 1024) {
2447 } else if (tids <= 256 * 1024) {
2453 t3_write_reg(adap, A_TP_PMM_SIZE,
2454 p->chan_rx_size | (p->chan_tx_size >> 16));
2456 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2457 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2458 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2459 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2460 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2462 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2463 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2464 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2466 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2467 /* Add a bit of headroom and make multiple of 24 */
2469 pstructs -= pstructs % 24;
2470 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2472 m = tids * TCB_SIZE;
2473 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2474 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2475 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2476 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2477 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2478 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2479 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2480 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2482 m = (m + 4095) & ~0xfff;
2483 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2484 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2486 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2487 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2488 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2490 adap->params.mc5.nservers += m - tids;
2493 static inline void tp_wr_indirect(adapter_t *adap, unsigned int addr, u32 val)
2495 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2496 t3_write_reg(adap, A_TP_PIO_DATA, val);
2499 static void tp_config(adapter_t *adap, const struct tp_params *p)
2501 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2502 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2503 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2504 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2505 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2506 V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
2507 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2508 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2509 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2510 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2511 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_IPV6ENABLE | F_NICMODE,
2512 F_IPV6ENABLE | F_NICMODE);
2513 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2514 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2515 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2516 adap->params.rev > 0 ? F_ENABLEESND :
2518 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2520 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2521 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2522 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, 0);
2523 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2524 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2526 if (adap->params.rev > 0) {
2527 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2528 t3_set_reg_field(adap, A_TP_PARA_REG3, 0,
2529 F_TXPACEAUTO | F_TXPACEAUTOSTRICT);
2530 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2531 tp_wr_indirect(adap, A_TP_VLAN_PRI_MAP, 0xfa50);
2532 tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP0, 0xfac688);
2533 tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP1, 0xfac688);
2535 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2537 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2538 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2539 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2540 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2542 if (adap->params.nports > 2) {
2543 t3_set_reg_field(adap, A_TP_PC_CONFIG2, 0,
2544 F_ENABLETXPORTFROMDA | F_ENABLERXPORTFROMADDR);
2545 tp_wr_bits_indirect(adap, A_TP_QOS_RX_MAP_MODE,
2546 V_RXMAPMODE(M_RXMAPMODE), 0);
2547 tp_wr_indirect(adap, A_TP_INGRESS_CONFIG, V_BITPOS0(48) |
2548 V_BITPOS1(49) | V_BITPOS2(50) | V_BITPOS3(51) |
2549 F_ENABLEEXTRACT | F_ENABLEEXTRACTIONSFD |
2550 F_ENABLEINSERTION | F_ENABLEINSERTIONSFD);
2551 tp_wr_indirect(adap, A_TP_PREAMBLE_MSB, 0xfb000000);
2552 tp_wr_indirect(adap, A_TP_PREAMBLE_LSB, 0xd5);
2553 tp_wr_indirect(adap, A_TP_INTF_FROM_TX_PKT, F_INTFFROMTXPKT);
2557 /* TCP timer values in ms */
2558 #define TP_DACK_TIMER 50
2559 #define TP_RTO_MIN 250
2562 * tp_set_timers - set TP timing parameters
2563 * @adap: the adapter to set
2564 * @core_clk: the core clock frequency in Hz
2566 * Set TP's timing parameters, such as the various timer resolutions and
2567 * the TCP timer values.
2569 static void tp_set_timers(adapter_t *adap, unsigned int core_clk)
2571 unsigned int tre = adap->params.tp.tre;
2572 unsigned int dack_re = adap->params.tp.dack_re;
2573 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2574 unsigned int tps = core_clk >> tre;
2576 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2577 V_DELAYEDACKRESOLUTION(dack_re) |
2578 V_TIMESTAMPRESOLUTION(tstamp_re));
2579 t3_write_reg(adap, A_TP_DACK_TIMER,
2580 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2581 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2582 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2583 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2584 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2585 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2586 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2587 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2590 #define SECONDS * tps
2592 t3_write_reg(adap, A_TP_MSL,
2593 adap->params.rev > 0 ? 0 : 2 SECONDS);
2594 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2595 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2596 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2597 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2598 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2599 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2600 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2601 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2606 #ifdef CONFIG_CHELSIO_T3_CORE
2608 * t3_tp_set_coalescing_size - set receive coalescing size
2609 * @adap: the adapter
2610 * @size: the receive coalescing size
2611 * @psh: whether a set PSH bit should deliver coalesced data
2613 * Set the receive coalescing size and PSH bit handling.
2615 int t3_tp_set_coalescing_size(adapter_t *adap, unsigned int size, int psh)
2619 if (size > MAX_RX_COALESCING_LEN)
2622 val = t3_read_reg(adap, A_TP_PARA_REG3);
2623 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2626 val |= F_RXCOALESCEENABLE;
2628 val |= F_RXCOALESCEPSHEN;
2629 size = min(MAX_RX_COALESCING_LEN, size);
2630 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2631 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2633 t3_write_reg(adap, A_TP_PARA_REG3, val);
2638 * t3_tp_set_max_rxsize - set the max receive size
2639 * @adap: the adapter
2640 * @size: the max receive size
2642 * Set TP's max receive size. This is the limit that applies when
2643 * receive coalescing is disabled.
2645 void t3_tp_set_max_rxsize(adapter_t *adap, unsigned int size)
2647 t3_write_reg(adap, A_TP_PARA_REG7,
2648 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2651 static void __devinit init_mtus(unsigned short mtus[])
2654 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2655 * it can accomodate max size TCP/IP headers when SACK and timestamps
2656 * are enabled and still have at least 8 bytes of payload.
2677 * init_cong_ctrl - initialize congestion control parameters
2678 * @a: the alpha values for congestion control
2679 * @b: the beta values for congestion control
2681 * Initialize the congestion control parameters.
2683 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
2685 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2710 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2713 b[13] = b[14] = b[15] = b[16] = 3;
2714 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2715 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2720 /* The minimum additive increment value for the congestion control table */
2721 #define CC_MIN_INCR 2U
2724 * t3_load_mtus - write the MTU and congestion control HW tables
2725 * @adap: the adapter
2726 * @mtus: the unrestricted values for the MTU table
2727 * @alpha: the values for the congestion control alpha parameter
2728 * @beta: the values for the congestion control beta parameter
2729 * @mtu_cap: the maximum permitted effective MTU
2731 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2732 * Update the high-speed congestion control table with the supplied alpha,
2735 void t3_load_mtus(adapter_t *adap, unsigned short mtus[NMTUS],
2736 unsigned short alpha[NCCTRL_WIN],
2737 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2739 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2740 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2741 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2742 28672, 40960, 57344, 81920, 114688, 163840, 229376 };
2746 for (i = 0; i < NMTUS; ++i) {
2747 unsigned int mtu = min(mtus[i], mtu_cap);
2748 unsigned int log2 = fls(mtu);
2750 if (!(mtu & ((1 << log2) >> 2))) /* round */
2752 t3_write_reg(adap, A_TP_MTU_TABLE,
2753 (i << 24) | (log2 << 16) | mtu);
2755 for (w = 0; w < NCCTRL_WIN; ++w) {
2758 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2761 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2762 (w << 16) | (beta[w] << 13) | inc);
2768 * t3_read_hw_mtus - returns the values in the HW MTU table
2769 * @adap: the adapter
2770 * @mtus: where to store the HW MTU values
2772 * Reads the HW MTU table.
2774 void t3_read_hw_mtus(adapter_t *adap, unsigned short mtus[NMTUS])
2778 for (i = 0; i < NMTUS; ++i) {
2781 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2782 val = t3_read_reg(adap, A_TP_MTU_TABLE);
2783 mtus[i] = val & 0x3fff;
2788 * t3_get_cong_cntl_tab - reads the congestion control table
2789 * @adap: the adapter
2790 * @incr: where to store the alpha values
2792 * Reads the additive increments programmed into the HW congestion
2795 void t3_get_cong_cntl_tab(adapter_t *adap,
2796 unsigned short incr[NMTUS][NCCTRL_WIN])
2798 unsigned int mtu, w;
2800 for (mtu = 0; mtu < NMTUS; ++mtu)
2801 for (w = 0; w < NCCTRL_WIN; ++w) {
2802 t3_write_reg(adap, A_TP_CCTRL_TABLE,
2803 0xffff0000 | (mtu << 5) | w);
2804 incr[mtu][w] = (unsigned short)t3_read_reg(adap,
2805 A_TP_CCTRL_TABLE) & 0x1fff;
2810 * t3_tp_get_mib_stats - read TP's MIB counters
2811 * @adap: the adapter
2812 * @tps: holds the returned counter values
2814 * Returns the values of TP's MIB counters.
2816 void t3_tp_get_mib_stats(adapter_t *adap, struct tp_mib_stats *tps)
2818 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *)tps,
2819 sizeof(*tps) / sizeof(u32), 0);
2823 * t3_read_pace_tbl - read the pace table
2824 * @adap: the adapter
2825 * @pace_vals: holds the returned values
2827 * Returns the values of TP's pace table in nanoseconds.
2829 void t3_read_pace_tbl(adapter_t *adap, unsigned int pace_vals[NTX_SCHED])
2831 unsigned int i, tick_ns = dack_ticks_to_usec(adap, 1000);
2833 for (i = 0; i < NTX_SCHED; i++) {
2834 t3_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
2835 pace_vals[i] = t3_read_reg(adap, A_TP_PACE_TABLE) * tick_ns;
2840 * t3_set_pace_tbl - set the pace table
2841 * @adap: the adapter
2842 * @pace_vals: the pace values in nanoseconds
2843 * @start: index of the first entry in the HW pace table to set
2844 * @n: how many entries to set
2846 * Sets (a subset of the) HW pace table.
2848 void t3_set_pace_tbl(adapter_t *adap, unsigned int *pace_vals,
2849 unsigned int start, unsigned int n)
2851 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
2853 for ( ; n; n--, start++, pace_vals++)
2854 t3_write_reg(adap, A_TP_PACE_TABLE, (start << 16) |
2855 ((*pace_vals + tick_ns / 2) / tick_ns));
2858 #define ulp_region(adap, name, start, len) \
2859 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2860 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2861 (start) + (len) - 1); \
2864 #define ulptx_region(adap, name, start, len) \
2865 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2866 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2867 (start) + (len) - 1)
2869 static void ulp_config(adapter_t *adap, const struct tp_params *p)
2871 unsigned int m = p->chan_rx_size;
2873 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2874 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2875 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2876 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2877 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2878 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2879 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2880 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2885 * t3_set_proto_sram - set the contents of the protocol sram
2886 * @adapter: the adapter
2887 * @data: the protocol image
2889 * Write the contents of the protocol SRAM.
2891 int t3_set_proto_sram(adapter_t *adap, const u8 *data)
2894 const u32 *buf = (const u32 *)data;
2896 for (i = 0; i < PROTO_SRAM_LINES; i++) {
2897 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, cpu_to_be32(*buf++));
2898 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, cpu_to_be32(*buf++));
2899 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, cpu_to_be32(*buf++));
2900 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, cpu_to_be32(*buf++));
2901 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, cpu_to_be32(*buf++));
2903 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2904 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2912 * t3_config_trace_filter - configure one of the tracing filters
2913 * @adapter: the adapter
2914 * @tp: the desired trace filter parameters
2915 * @filter_index: which filter to configure
2916 * @invert: if set non-matching packets are traced instead of matching ones
2917 * @enable: whether to enable or disable the filter
2919 * Configures one of the tracing filters available in HW.
2921 void t3_config_trace_filter(adapter_t *adapter, const struct trace_params *tp,
2922 int filter_index, int invert, int enable)
2924 u32 addr, key[4], mask[4];
2926 key[0] = tp->sport | (tp->sip << 16);
2927 key[1] = (tp->sip >> 16) | (tp->dport << 16);
2929 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2931 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2932 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2933 mask[2] = tp->dip_mask;
2934 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2937 key[3] |= (1 << 29);
2939 key[3] |= (1 << 28);
2941 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2942 tp_wr_indirect(adapter, addr++, key[0]);
2943 tp_wr_indirect(adapter, addr++, mask[0]);
2944 tp_wr_indirect(adapter, addr++, key[1]);
2945 tp_wr_indirect(adapter, addr++, mask[1]);
2946 tp_wr_indirect(adapter, addr++, key[2]);
2947 tp_wr_indirect(adapter, addr++, mask[2]);
2948 tp_wr_indirect(adapter, addr++, key[3]);
2949 tp_wr_indirect(adapter, addr, mask[3]);
2950 (void) t3_read_reg(adapter, A_TP_PIO_DATA);
2954 * t3_config_sched - configure a HW traffic scheduler
2955 * @adap: the adapter
2956 * @kbps: target rate in Kbps
2957 * @sched: the scheduler index
2959 * Configure a Tx HW scheduler for the target rate.
2961 int t3_config_sched(adapter_t *adap, unsigned int kbps, int sched)
2963 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2964 unsigned int clk = adap->params.vpd.cclk * 1000;
2965 unsigned int selected_cpt = 0, selected_bpt = 0;
2968 kbps *= 125; /* -> bytes */
2969 for (cpt = 1; cpt <= 255; cpt++) {
2971 bpt = (kbps + tps / 2) / tps;
2972 if (bpt > 0 && bpt <= 255) {
2974 delta = v >= kbps ? v - kbps : kbps - v;
2975 if (delta <= mindelta) {
2980 } else if (selected_cpt)
2986 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
2987 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
2988 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
2990 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
2992 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
2993 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
2998 * t3_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
2999 * @adap: the adapter
3000 * @sched: the scheduler index
3001 * @ipg: the interpacket delay in tenths of nanoseconds
3003 * Set the interpacket delay for a HW packet rate scheduler.
3005 int t3_set_sched_ipg(adapter_t *adap, int sched, unsigned int ipg)
3007 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3009 /* convert ipg to nearest number of core clocks */
3010 ipg *= core_ticks_per_usec(adap);
3011 ipg = (ipg + 5000) / 10000;
3015 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3016 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3018 v = (v & 0xffff) | (ipg << 16);
3020 v = (v & 0xffff0000) | ipg;
3021 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3022 t3_read_reg(adap, A_TP_TM_PIO_DATA);
3027 * t3_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3028 * @adap: the adapter
3029 * @sched: the scheduler index
3030 * @kbps: the byte rate in Kbps
3031 * @ipg: the interpacket delay in tenths of nanoseconds
3033 * Return the current configuration of a HW Tx scheduler.
3035 void t3_get_tx_sched(adapter_t *adap, unsigned int sched, unsigned int *kbps,
3038 unsigned int v, addr, bpt, cpt;
3041 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3042 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3043 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3046 bpt = (v >> 8) & 0xff;
3049 *kbps = 0; /* scheduler disabled */
3051 v = (adap->params.vpd.cclk * 1000) / cpt;
3052 *kbps = (v * bpt) / 125;
3056 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3057 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3058 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3062 *ipg = (10000 * v) / core_ticks_per_usec(adap);
3067 * tp_init - configure TP
3068 * @adap: the adapter
3069 * @p: TP configuration parameters
3071 * Initializes the TP HW module.
3073 static int tp_init(adapter_t *adap, const struct tp_params *p)
3078 t3_set_vlan_accel(adap, 3, 0);
3080 if (is_offload(adap)) {
3081 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3082 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3083 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3086 CH_ERR(adap, "TP initialization timed out\n");
3090 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3095 * t3_mps_set_active_ports - configure port failover
3096 * @adap: the adapter
3097 * @port_mask: bitmap of active ports
3099 * Sets the active ports according to the supplied bitmap.
3101 int t3_mps_set_active_ports(adapter_t *adap, unsigned int port_mask)
3103 if (port_mask & ~((1 << adap->params.nports) - 1))
3105 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3106 port_mask << S_PORT0ACTIVE);
3111 * chan_init_hw - channel-dependent HW initialization
3112 * @adap: the adapter
3113 * @chan_map: bitmap of Tx channels being used
3115 * Perform the bits of HW initialization that are dependent on the Tx
3116 * channels being used.
3118 static void chan_init_hw(adapter_t *adap, unsigned int chan_map)
3122 if (chan_map != 3) { /* one channel */
3123 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3124 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3125 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3126 (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3127 F_TPTXPORT1EN | F_PORT1ACTIVE));
3128 t3_write_reg(adap, A_PM1_TX_CFG,
3129 chan_map == 1 ? 0xffffffff : 0);
3131 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3132 V_TX_MOD_QUEUE_REQ_MAP(0xff));
3133 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xd9c8);
3134 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfbea);
3135 } else { /* two channels */
3136 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3137 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3138 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3139 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3140 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3141 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3143 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3144 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3145 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3146 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3147 for (i = 0; i < 16; i++)
3148 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3149 (i << 16) | 0x1010);
3150 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xba98);
3151 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfedc);
3155 static int calibrate_xgm(adapter_t *adapter)
3157 if (uses_xaui(adapter)) {
3160 for (i = 0; i < 5; ++i) {
3161 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3162 (void) t3_read_reg(adapter, A_XGM_XAUI_IMP);
3164 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3165 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3166 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3167 V_XAUIIMP(G_CALIMP(v) >> 2));
3171 CH_ERR(adapter, "MAC calibration failed\n");
3174 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3175 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3176 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3177 F_XGM_IMPSETUPDATE);
3182 static void calibrate_xgm_t3b(adapter_t *adapter)
3184 if (!uses_xaui(adapter)) {
3185 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3186 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3187 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3188 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3189 F_XGM_IMPSETUPDATE);
3190 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3192 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3193 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3197 struct mc7_timing_params {
3198 unsigned char ActToPreDly;
3199 unsigned char ActToRdWrDly;
3200 unsigned char PreCyc;
3201 unsigned char RefCyc[5];
3202 unsigned char BkCyc;
3203 unsigned char WrToRdDly;
3204 unsigned char RdToWrDly;
3208 * Write a value to a register and check that the write completed. These
3209 * writes normally complete in a cycle or two, so one read should suffice.
3210 * The very first read exists to flush the posted write to the device.
3212 static int wrreg_wait(adapter_t *adapter, unsigned int addr, u32 val)
3214 t3_write_reg(adapter, addr, val);
3215 (void) t3_read_reg(adapter, addr); /* flush */
3216 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3218 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3222 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3224 static const unsigned int mc7_mode[] = {
3225 0x632, 0x642, 0x652, 0x432, 0x442
3227 static const struct mc7_timing_params mc7_timings[] = {
3228 { 12, 3, 4, { 20, 28, 34, 52, 0 }, 15, 6, 4 },
3229 { 12, 4, 5, { 20, 28, 34, 52, 0 }, 16, 7, 4 },
3230 { 12, 5, 6, { 20, 28, 34, 52, 0 }, 17, 8, 4 },
3231 { 9, 3, 4, { 15, 21, 26, 39, 0 }, 12, 6, 4 },
3232 { 9, 4, 5, { 15, 21, 26, 39, 0 }, 13, 7, 4 }
3236 unsigned int width, density, slow, attempts;
3237 adapter_t *adapter = mc7->adapter;
3238 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3243 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3244 slow = val & F_SLOW;
3245 width = G_WIDTH(val);
3246 density = G_DEN(val);
3248 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3249 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3253 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3254 (void) t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3256 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3257 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3258 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3264 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3265 V_ACTTOPREDLY(p->ActToPreDly) |
3266 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3267 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3268 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3270 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3271 val | F_CLKEN | F_TERM150);
3272 (void) t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3275 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3280 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3281 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3282 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3283 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3287 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3288 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL,
3293 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3294 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3295 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3296 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3297 mc7_mode[mem_type]) ||
3298 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3299 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3302 /* clock value is in KHz */
3303 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3304 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3306 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3307 F_PERREFEN | V_PREREFDIV(mc7_clock));
3308 (void) t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3310 t3_write_reg(adapter, mc7->offset + A_MC7_ECC,
3311 F_ECCGENEN | F_ECCCHKEN);
3312 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3313 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3314 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3315 (mc7->size << width) - 1);
3316 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3317 (void) t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3322 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3323 } while ((val & F_BUSY) && --attempts);
3325 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3329 /* Enable normal memory accesses. */
3330 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3337 static void config_pcie(adapter_t *adap)
3339 static const u16 ack_lat[4][6] = {
3340 { 237, 416, 559, 1071, 2095, 4143 },
3341 { 128, 217, 289, 545, 1057, 2081 },
3342 { 73, 118, 154, 282, 538, 1050 },
3343 { 67, 107, 86, 150, 278, 534 }
3345 static const u16 rpl_tmr[4][6] = {
3346 { 711, 1248, 1677, 3213, 6285, 12429 },
3347 { 384, 651, 867, 1635, 3171, 6243 },
3348 { 219, 354, 462, 846, 1614, 3150 },
3349 { 201, 321, 258, 450, 834, 1602 }
3353 unsigned int log2_width, pldsize;
3354 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3356 t3_os_pci_read_config_2(adap,
3357 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3359 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3361 t3_os_pci_read_config_2(adap,
3362 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3365 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3366 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3367 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3368 log2_width = fls(adap->params.pci.width) - 1;
3369 acklat = ack_lat[log2_width][pldsize];
3370 if (val & 1) /* check LOsEnable */
3371 acklat += fst_trn_tx * 4;
3372 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3374 if (adap->params.rev == 0)
3375 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3376 V_T3A_ACKLAT(M_T3A_ACKLAT),
3377 V_T3A_ACKLAT(acklat));
3379 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3382 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3383 V_REPLAYLMT(rpllmt));
3385 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3386 t3_set_reg_field(adap, A_PCIE_CFG, F_PCIE_CLIDECEN, F_PCIE_CLIDECEN);
3390 * t3_init_hw - initialize and configure T3 HW modules
3391 * @adapter: the adapter
3392 * @fw_params: initial parameters to pass to firmware (optional)
3394 * Initialize and configure T3 HW modules. This performs the
3395 * initialization steps that need to be done once after a card is reset.
3396 * MAC and PHY initialization is handled separarely whenever a port is
3399 * @fw_params are passed to FW and their value is platform dependent.
3400 * Only the top 8 bits are available for use, the rest must be 0.
3402 int t3_init_hw(adapter_t *adapter, u32 fw_params)
3404 int err = -EIO, attempts = 100;
3405 const struct vpd_params *vpd = &adapter->params.vpd;
3407 if (adapter->params.rev > 0)
3408 calibrate_xgm_t3b(adapter);
3409 else if (calibrate_xgm(adapter))
3412 if (adapter->params.nports > 2)
3413 t3_mac_reset(&adap2pinfo(adapter, 0)->mac);
3416 partition_mem(adapter, &adapter->params.tp);
3418 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3419 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3420 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3421 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3422 adapter->params.mc5.nfilters,
3423 adapter->params.mc5.nroutes))
3427 if (tp_init(adapter, &adapter->params.tp))
3430 #ifdef CONFIG_CHELSIO_T3_CORE
3431 t3_tp_set_coalescing_size(adapter,
3432 min(adapter->params.sge.max_pkt_size,
3433 MAX_RX_COALESCING_LEN), 1);
3434 t3_tp_set_max_rxsize(adapter,
3435 min(adapter->params.sge.max_pkt_size, 16384U));
3436 ulp_config(adapter, &adapter->params.tp);
3438 if (is_pcie(adapter))
3439 config_pcie(adapter);
3441 t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN);
3443 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3444 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3445 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3446 chan_init_hw(adapter, adapter->params.chan_map);
3447 t3_sge_init(adapter, &adapter->params.sge);
3449 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3450 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3451 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3452 (void) t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3454 do { /* wait for uP to initialize */
3456 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3458 CH_ERR(adapter, "uP initialization timed out\n");
3468 * get_pci_mode - determine a card's PCI mode
3469 * @adapter: the adapter
3470 * @p: where to store the PCI settings
3472 * Determines a card's PCI mode and associated parameters, such as speed
3475 static void __devinit get_pci_mode(adapter_t *adapter, struct pci_params *p)
3477 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3478 u32 pci_mode, pcie_cap;
3480 pcie_cap = t3_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
3484 p->variant = PCI_VARIANT_PCIE;
3485 p->pcie_cap_addr = pcie_cap;
3486 t3_os_pci_read_config_2(adapter, pcie_cap + PCI_EXP_LNKSTA,
3488 p->width = (val >> 4) & 0x3f;
3492 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3493 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3494 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3495 pci_mode = G_PCIXINITPAT(pci_mode);
3497 p->variant = PCI_VARIANT_PCI;
3498 else if (pci_mode < 4)
3499 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3500 else if (pci_mode < 8)
3501 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3503 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3507 * init_link_config - initialize a link's SW state
3508 * @lc: structure holding the link state
3509 * @caps: link capabilities
3511 * Initializes the SW state maintained for each link, including the link's
3512 * capabilities and default speed/duplex/flow-control/autonegotiation
3515 static void __devinit init_link_config(struct link_config *lc,
3518 lc->supported = caps;
3519 lc->requested_speed = lc->speed = SPEED_INVALID;
3520 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3521 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3522 if (lc->supported & SUPPORTED_Autoneg) {
3523 lc->advertising = lc->supported;
3524 lc->autoneg = AUTONEG_ENABLE;
3525 lc->requested_fc |= PAUSE_AUTONEG;
3527 lc->advertising = 0;
3528 lc->autoneg = AUTONEG_DISABLE;
3533 * mc7_calc_size - calculate MC7 memory size
3534 * @cfg: the MC7 configuration
3536 * Calculates the size of an MC7 memory in bytes from the value of its
3537 * configuration register.
3539 static unsigned int __devinit mc7_calc_size(u32 cfg)
3541 unsigned int width = G_WIDTH(cfg);
3542 unsigned int banks = !!(cfg & F_BKS) + 1;
3543 unsigned int org = !!(cfg & F_ORG) + 1;
3544 unsigned int density = G_DEN(cfg);
3545 unsigned int MBs = ((256 << density) * banks) / (org << width);
3550 static void __devinit mc7_prep(adapter_t *adapter, struct mc7 *mc7,
3551 unsigned int base_addr, const char *name)
3555 mc7->adapter = adapter;
3557 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3558 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3559 mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3560 mc7->width = G_WIDTH(cfg);
3563 void mac_prep(struct cmac *mac, adapter_t *adapter, int index)
3565 mac->adapter = adapter;
3566 mac->multiport = adapter->params.nports > 2;
3567 if (mac->multiport) {
3568 mac->ext_port = (unsigned char)index;
3574 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3576 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3577 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3578 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3579 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3585 * early_hw_init - HW initialization done at card detection time
3586 * @adapter: the adapter
3587 * @ai: contains information about the adapter type and properties
3589 * Perfoms the part of HW initialization that is done early on when the
3590 * driver first detecs the card. Most of the HW state is initialized
3591 * lazily later on when a port or an offload function are first used.
3593 void early_hw_init(adapter_t *adapter, const struct adapter_info *ai)
3595 u32 val = V_PORTSPEED(is_10G(adapter) || adapter->params.nports > 2 ?
3598 mi1_init(adapter, ai);
3599 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3600 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3601 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3602 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3603 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3605 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3608 /* Enable MAC clocks so we can access the registers */
3609 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3610 (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
3612 val |= F_CLKDIVRESET_;
3613 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3614 (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
3615 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3616 (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
3620 * t3_reset_adapter - reset the adapter
3621 * @adapter: the adapter
3623 * Reset the adapter.
3625 static int t3_reset_adapter(adapter_t *adapter)
3627 int i, save_and_restore_pcie =
3628 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3631 if (save_and_restore_pcie)
3632 t3_os_pci_save_state(adapter);
3633 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3636 * Delay. Give Some time to device to reset fully.
3637 * XXX The delay time should be modified.
3639 for (i = 0; i < 10; i++) {
3641 t3_os_pci_read_config_2(adapter, 0x00, &devid);
3642 if (devid == 0x1425)
3646 if (devid != 0x1425)
3649 if (save_and_restore_pcie)
3650 t3_os_pci_restore_state(adapter);
3655 * t3_prep_adapter - prepare SW and HW for operation
3656 * @adapter: the adapter
3657 * @ai: contains information about the adapter type and properties
3659 * Initialize adapter SW state for the various HW modules, set initial
3660 * values for some adapter tunables, take PHYs out of reset, and
3661 * initialize the MDIO interface.
3663 int __devinit t3_prep_adapter(adapter_t *adapter,
3664 const struct adapter_info *ai, int reset)
3667 unsigned int i, j = 0;
3669 get_pci_mode(adapter, &adapter->params.pci);
3671 adapter->params.info = ai;
3672 adapter->params.nports = ai->nports0 + ai->nports1;
3673 adapter->params.chan_map = !!ai->nports0 | (!!ai->nports1 << 1);
3674 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3675 adapter->params.linkpoll_period = 0;
3676 if (adapter->params.nports > 2)
3677 adapter->params.stats_update_period = VSC_STATS_ACCUM_SECS;
3679 adapter->params.stats_update_period = is_10G(adapter) ?
3680 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3681 adapter->params.pci.vpd_cap_addr =
3682 t3_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
3684 ret = get_vpd_params(adapter, &adapter->params.vpd);
3688 if (reset && t3_reset_adapter(adapter))
3691 t3_sge_prep(adapter, &adapter->params.sge);
3693 if (adapter->params.vpd.mclk) {
3694 struct tp_params *p = &adapter->params.tp;
3696 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3697 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3698 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3700 p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
3701 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3702 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3703 p->cm_size = t3_mc7_size(&adapter->cm);
3704 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3705 p->chan_tx_size = p->pmtx_size / p->nchan;
3706 p->rx_pg_size = 64 * 1024;
3707 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3708 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3709 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3710 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3711 adapter->params.rev > 0 ? 12 : 6;
3712 p->tre = fls(adapter->params.vpd.cclk / (1000 / TP_TMR_RES)) -
3714 p->dack_re = fls(adapter->params.vpd.cclk / 10) - 1; /* 100us */
3717 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3718 t3_mc7_size(&adapter->pmtx) &&
3719 t3_mc7_size(&adapter->cm);
3721 if (is_offload(adapter)) {
3722 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3723 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3724 DEFAULT_NFILTERS : 0;
3725 adapter->params.mc5.nroutes = 0;
3726 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3728 #ifdef CONFIG_CHELSIO_T3_CORE
3729 init_mtus(adapter->params.mtus);
3730 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3734 early_hw_init(adapter, ai);
3736 if (adapter->params.nports > 2 &&
3737 (ret = t3_vsc7323_init(adapter, adapter->params.nports)))
3740 for_each_port(adapter, i) {
3742 struct port_info *p = adap2pinfo(adapter, i);
3744 while (!adapter->params.vpd.port_type[j])
3747 p->port_type = &port_types[adapter->params.vpd.port_type[j]];
3748 p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3750 mac_prep(&p->mac, adapter, j);
3754 * The VPD EEPROM stores the base Ethernet address for the
3755 * card. A port's address is derived from the base by adding
3756 * the port's index to the base's low octet.
3758 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3759 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3761 t3_os_set_hw_addr(adapter, i, hw_addr);
3762 init_link_config(&p->link_config, p->port_type->caps);
3763 p->phy.ops->power_down(&p->phy, 1);
3764 if (!(p->port_type->caps & SUPPORTED_IRQ))
3765 adapter->params.linkpoll_period = 10;
3771 void t3_led_ready(adapter_t *adapter)
3773 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3777 void t3_port_failover(adapter_t *adapter, int port)
3781 val = port ? F_PORT1ACTIVE : F_PORT0ACTIVE;
3782 t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
3786 void t3_failover_done(adapter_t *adapter, int port)
3788 t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
3789 F_PORT0ACTIVE | F_PORT1ACTIVE);
3792 void t3_failover_clear(adapter_t *adapter)
3794 t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
3795 F_PORT0ACTIVE | F_PORT1ACTIVE);