2 * Copyright (c) 2012 Chelsio Communications, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
34 #include "t4_regs_values.h"
35 #include "firmware/t4fw_interface.h"
38 #define msleep(x) do { \
42 pause("t4hw", (x) * hz / 1000); \
46 * t4_wait_op_done_val - wait until an operation is completed
47 * @adapter: the adapter performing the operation
48 * @reg: the register to check for completion
49 * @mask: a single-bit field within @reg that indicates completion
50 * @polarity: the value of the field when the operation is completed
51 * @attempts: number of check iterations
52 * @delay: delay in usecs between iterations
53 * @valp: where to store the value of the register at completion time
55 * Wait until an operation is completed by checking a bit in a register
56 * up to @attempts times. If @valp is not NULL the value of the register
57 * at the time it indicated completion is stored there. Returns 0 if the
58 * operation completes and -EAGAIN otherwise.
60 int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
61 int polarity, int attempts, int delay, u32 *valp)
64 u32 val = t4_read_reg(adapter, reg);
66 if (!!(val & mask) == polarity) {
79 * t4_set_reg_field - set a register field to a value
80 * @adapter: the adapter to program
81 * @addr: the register address
82 * @mask: specifies the portion of the register to modify
83 * @val: the new value for the register field
85 * Sets a register field specified by the supplied mask to the
88 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
91 u32 v = t4_read_reg(adapter, addr) & ~mask;
93 t4_write_reg(adapter, addr, v | val);
94 (void) t4_read_reg(adapter, addr); /* flush */
98 * t4_read_indirect - read indirectly addressed registers
100 * @addr_reg: register holding the indirect address
101 * @data_reg: register holding the value of the indirect register
102 * @vals: where the read register values are stored
103 * @nregs: how many indirect registers to read
104 * @start_idx: index of first indirect register to read
106 * Reads registers that are accessed indirectly through an address/data
109 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
110 unsigned int data_reg, u32 *vals, unsigned int nregs,
111 unsigned int start_idx)
114 t4_write_reg(adap, addr_reg, start_idx);
115 *vals++ = t4_read_reg(adap, data_reg);
121 * t4_write_indirect - write indirectly addressed registers
123 * @addr_reg: register holding the indirect addresses
124 * @data_reg: register holding the value for the indirect registers
125 * @vals: values to write
126 * @nregs: how many indirect registers to write
127 * @start_idx: address of first indirect register to write
129 * Writes a sequential block of registers that are accessed indirectly
130 * through an address/data register pair.
132 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
133 unsigned int data_reg, const u32 *vals,
134 unsigned int nregs, unsigned int start_idx)
137 t4_write_reg(adap, addr_reg, start_idx++);
138 t4_write_reg(adap, data_reg, *vals++);
143 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
144 * mechanism. This guarantees that we get the real value even if we're
145 * operating within a Virtual Machine and the Hypervisor is trapping our
146 * Configuration Space accesses.
148 u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
150 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ,
151 F_ENABLE | F_LOCALCFG | V_FUNCTION(adap->pf) |
153 return t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
157 * t4_report_fw_error - report firmware error
160 * The adapter firmware can indicate error conditions to the host.
161 * This routine prints out the reason for the firmware error (as
162 * reported by the firmware).
164 static void t4_report_fw_error(struct adapter *adap)
166 static const char *reason[] = {
167 "Crash", /* PCIE_FW_EVAL_CRASH */
168 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
169 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
170 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
171 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
172 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
173 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
174 "Reserved", /* reserved */
178 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
179 if (!(pcie_fw & F_PCIE_FW_ERR))
180 CH_ERR(adap, "Firmware error report called with no error\n");
182 CH_ERR(adap, "Firmware reports adapter error: %s\n",
183 reason[G_PCIE_FW_EVAL(pcie_fw)]);
187 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
189 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
192 for ( ; nflit; nflit--, mbox_addr += 8)
193 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
197 * Handle a FW assertion reported in a mailbox.
199 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
201 struct fw_debug_cmd asrt;
203 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
204 CH_ALERT(adap, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
205 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
206 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
209 #define X_CIM_PF_NOACCESS 0xeeeeeeee
211 * t4_wr_mbox_meat - send a command to FW through the given mailbox
213 * @mbox: index of the mailbox to use
214 * @cmd: the command to write
215 * @size: command length in bytes
216 * @rpl: where to optionally store the reply
217 * @sleep_ok: if true we may sleep while awaiting command completion
219 * Sends the given command to FW through the selected mailbox and waits
220 * for the FW to execute the command. If @rpl is not %NULL it is used to
221 * store the FW's reply to the command. The command and its optional
222 * reply are of the same length. Some FW commands like RESET and
223 * INITIALIZE can take a considerable amount of time to execute.
224 * @sleep_ok determines whether we may sleep while awaiting the response.
225 * If sleeping is allowed we use progressive backoff otherwise we spin.
227 * The return value is 0 on success or a negative errno on failure. A
228 * failure can happen either because we are not able to execute the
229 * command or FW executes it but signals an error. In the latter case
230 * the return value is the error code indicated by FW (negated).
232 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
233 void *rpl, bool sleep_ok)
236 * We delay in small increments at first in an effort to maintain
237 * responsiveness for simple, fast executing commands but then back
238 * off to larger delays to a maximum retry delay.
240 static const int delay[] = {
241 1, 1, 3, 5, 10, 10, 20, 50, 100
246 int i, ms, delay_idx;
247 const __be64 *p = cmd;
248 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
249 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
251 if ((size & 15) || size > MBOX_LEN)
254 v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
255 for (i = 0; v == X_MBOWNER_NONE && i < 3; i++)
256 v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
258 if (v != X_MBOWNER_PL)
259 return v ? -EBUSY : -ETIMEDOUT;
261 for (i = 0; i < size; i += 8, p++)
262 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
264 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
265 t4_read_reg(adap, ctl_reg); /* flush write */
270 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
272 ms = delay[delay_idx]; /* last element may repeat */
273 if (delay_idx < ARRAY_SIZE(delay) - 1)
279 v = t4_read_reg(adap, ctl_reg);
280 if (v == X_CIM_PF_NOACCESS)
282 if (G_MBOWNER(v) == X_MBOWNER_PL) {
283 if (!(v & F_MBMSGVALID)) {
284 t4_write_reg(adap, ctl_reg,
285 V_MBOWNER(X_MBOWNER_NONE));
289 res = t4_read_reg64(adap, data_reg);
290 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
291 fw_asrt(adap, data_reg);
292 res = V_FW_CMD_RETVAL(EIO);
294 get_mbox_rpl(adap, rpl, size / 8, data_reg);
295 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
296 return -G_FW_CMD_RETVAL((int)res);
301 * We timed out waiting for a reply to our mailbox command. Report
302 * the error and also check to see if the firmware reported any
305 CH_ERR(adap, "command %#x in mailbox %d timed out\n",
306 *(const u8 *)cmd, mbox);
307 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
308 t4_report_fw_error(adap);
313 * t4_mc_read - read from MC through backdoor accesses
315 * @addr: address of first byte requested
316 * @data: 64 bytes of data containing the requested address
317 * @ecc: where to store the corresponding 64-bit ECC word
319 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
320 * that covers the requested address @addr. If @parity is not %NULL it
321 * is assigned the 64-bit ECC word for the read data.
323 int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
327 if (t4_read_reg(adap, A_MC_BIST_CMD) & F_START_BIST)
329 t4_write_reg(adap, A_MC_BIST_CMD_ADDR, addr & ~0x3fU);
330 t4_write_reg(adap, A_MC_BIST_CMD_LEN, 64);
331 t4_write_reg(adap, A_MC_BIST_DATA_PATTERN, 0xc);
332 t4_write_reg(adap, A_MC_BIST_CMD, V_BIST_OPCODE(1) | F_START_BIST |
334 i = t4_wait_op_done(adap, A_MC_BIST_CMD, F_START_BIST, 0, 10, 1);
338 #define MC_DATA(i) MC_BIST_STATUS_REG(A_MC_BIST_STATUS_RDATA, i)
340 for (i = 15; i >= 0; i--)
341 *data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
343 *ecc = t4_read_reg64(adap, MC_DATA(16));
349 * t4_edc_read - read from EDC through backdoor accesses
351 * @idx: which EDC to access
352 * @addr: address of first byte requested
353 * @data: 64 bytes of data containing the requested address
354 * @ecc: where to store the corresponding 64-bit ECC word
356 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
357 * that covers the requested address @addr. If @parity is not %NULL it
358 * is assigned the 64-bit ECC word for the read data.
360 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
365 if (t4_read_reg(adap, A_EDC_BIST_CMD + idx) & F_START_BIST)
367 t4_write_reg(adap, A_EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
368 t4_write_reg(adap, A_EDC_BIST_CMD_LEN + idx, 64);
369 t4_write_reg(adap, A_EDC_BIST_DATA_PATTERN + idx, 0xc);
370 t4_write_reg(adap, A_EDC_BIST_CMD + idx,
371 V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
372 i = t4_wait_op_done(adap, A_EDC_BIST_CMD + idx, F_START_BIST, 0, 10, 1);
376 #define EDC_DATA(i) (EDC_BIST_STATUS_REG(A_EDC_BIST_STATUS_RDATA, i) + idx)
378 for (i = 15; i >= 0; i--)
379 *data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
381 *ecc = t4_read_reg64(adap, EDC_DATA(16));
387 * t4_mem_read - read EDC 0, EDC 1 or MC into buffer
389 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
390 * @addr: address within indicated memory type
391 * @len: amount of memory to read
392 * @buf: host memory buffer
394 * Reads an [almost] arbitrary memory region in the firmware: the
395 * firmware memory address, length and host buffer must be aligned on
396 * 32-bit boudaries. The memory is returned as a raw byte sequence from
397 * the firmware's memory. If this memory contains data structures which
398 * contain multi-byte integers, it's the callers responsibility to
399 * perform appropriate byte order conversions.
401 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
404 u32 pos, start, end, offset;
408 * Argument sanity checks ...
410 if ((addr & 0x3) || (len & 0x3))
414 * The underlaying EDC/MC read routines read 64 bytes at a time so we
415 * need to round down the start and round up the end. We'll start
416 * copying out of the first line at (addr - start) a word at a time.
418 start = addr & ~(64-1);
419 end = (addr + len + 64-1) & ~(64-1);
420 offset = (addr - start)/sizeof(__be32);
422 for (pos = start; pos < end; pos += 64, offset = 0) {
426 * Read the chip's memory block and bail if there's an error.
429 ret = t4_mc_read(adap, pos, data, NULL);
431 ret = t4_edc_read(adap, mtype, pos, data, NULL);
436 * Copy the data into the caller's memory buffer.
438 while (offset < 16 && len > 0) {
439 *buf++ = data[offset++];
440 len -= sizeof(__be32);
448 * Partial EEPROM Vital Product Data structure. Includes only the ID and
460 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
462 #define EEPROM_MAX_RD_POLL 40
463 #define EEPROM_MAX_WR_POLL 6
464 #define EEPROM_STAT_ADDR 0x7bfc
465 #define VPD_BASE 0x400
466 #define VPD_BASE_OLD 0
468 #define VPD_INFO_FLD_HDR_SIZE 3
471 * t4_seeprom_read - read a serial EEPROM location
472 * @adapter: adapter to read
473 * @addr: EEPROM virtual address
474 * @data: where to store the read data
476 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
477 * VPD capability. Note that this function must be called with a virtual
480 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
483 int attempts = EEPROM_MAX_RD_POLL;
484 unsigned int base = adapter->params.pci.vpd_cap_addr;
486 if (addr >= EEPROMVSIZE || (addr & 3))
489 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
492 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
493 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
495 if (!(val & PCI_VPD_ADDR_F)) {
496 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
499 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
500 *data = le32_to_cpu(*data);
505 * t4_seeprom_write - write a serial EEPROM location
506 * @adapter: adapter to write
507 * @addr: virtual EEPROM address
508 * @data: value to write
510 * Write a 32-bit word to a location in serial EEPROM using the card's PCI
511 * VPD capability. Note that this function must be called with a virtual
514 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
517 int attempts = EEPROM_MAX_WR_POLL;
518 unsigned int base = adapter->params.pci.vpd_cap_addr;
520 if (addr >= EEPROMVSIZE || (addr & 3))
523 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
525 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
526 (u16)addr | PCI_VPD_ADDR_F);
529 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
530 } while ((val & PCI_VPD_ADDR_F) && --attempts);
532 if (val & PCI_VPD_ADDR_F) {
533 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
540 * t4_eeprom_ptov - translate a physical EEPROM address to virtual
541 * @phys_addr: the physical EEPROM address
542 * @fn: the PCI function number
543 * @sz: size of function-specific area
545 * Translate a physical EEPROM address to virtual. The first 1K is
546 * accessed through virtual addresses starting at 31K, the rest is
547 * accessed through virtual addresses starting at 0.
549 * The mapping is as follows:
550 * [0..1K) -> [31K..32K)
551 * [1K..1K+A) -> [ES-A..ES)
552 * [1K+A..ES) -> [0..ES-A-1K)
554 * where A = @fn * @sz, and ES = EEPROM size.
556 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
559 if (phys_addr < 1024)
560 return phys_addr + (31 << 10);
561 if (phys_addr < 1024 + fn)
562 return EEPROMSIZE - fn + phys_addr - 1024;
563 if (phys_addr < EEPROMSIZE)
564 return phys_addr - 1024 - fn;
569 * t4_seeprom_wp - enable/disable EEPROM write protection
570 * @adapter: the adapter
571 * @enable: whether to enable or disable write protection
573 * Enables or disables write protection on the serial EEPROM.
575 int t4_seeprom_wp(struct adapter *adapter, int enable)
577 return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
581 * get_vpd_keyword_val - Locates an information field keyword in the VPD
582 * @v: Pointer to buffered vpd data structure
583 * @kw: The keyword to search for
585 * Returns the value of the information field keyword or
588 static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
591 unsigned int offset , len;
592 const u8 *buf = &v->id_tag;
593 const u8 *vpdr_len = &v->vpdr_tag;
594 offset = sizeof(struct t4_vpd_hdr);
595 len = (u16)vpdr_len[1] + ((u16)vpdr_len[2] << 8);
597 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
601 for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
602 if(memcmp(buf + i , kw , 2) == 0){
603 i += VPD_INFO_FLD_HDR_SIZE;
607 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
615 * get_vpd_params - read VPD parameters from VPD EEPROM
616 * @adapter: adapter to read
617 * @p: where to store the parameters
619 * Reads card parameters stored in VPD EEPROM.
621 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
625 u8 vpd[VPD_LEN], csum;
626 const struct t4_vpd_hdr *v;
629 * Card information normally starts at VPD_BASE but early cards had
632 ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd));
633 addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
635 for (i = 0; i < sizeof(vpd); i += 4) {
636 ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i));
640 v = (const struct t4_vpd_hdr *)vpd;
642 #define FIND_VPD_KW(var,name) do { \
643 var = get_vpd_keyword_val(v , name); \
645 CH_ERR(adapter, "missing VPD keyword " name "\n"); \
650 FIND_VPD_KW(i, "RV");
651 for (csum = 0; i >= 0; i--)
655 CH_ERR(adapter, "corrupted VPD EEPROM, actual csum %u\n", csum);
658 FIND_VPD_KW(ec, "EC");
659 FIND_VPD_KW(sn, "SN");
660 FIND_VPD_KW(pn, "PN");
661 FIND_VPD_KW(na, "NA");
664 memcpy(p->id, v->id_data, ID_LEN);
666 memcpy(p->ec, vpd + ec, EC_LEN);
668 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
669 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
671 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
672 strstrip((char *)p->pn);
673 memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
674 strstrip((char *)p->na);
679 /* serial flash and firmware constants and flash config file constants */
681 SF_ATTEMPTS = 10, /* max retries for SF operations */
683 /* flash command opcodes */
684 SF_PROG_PAGE = 2, /* program page */
685 SF_WR_DISABLE = 4, /* disable writes */
686 SF_RD_STATUS = 5, /* read status register */
687 SF_WR_ENABLE = 6, /* enable writes */
688 SF_RD_DATA_FAST = 0xb, /* read flash */
689 SF_RD_ID = 0x9f, /* read ID */
690 SF_ERASE_SECTOR = 0xd8, /* erase sector */
694 * sf1_read - read data from the serial flash
695 * @adapter: the adapter
696 * @byte_cnt: number of bytes to read
697 * @cont: whether another operation will be chained
698 * @lock: whether to lock SF for PL access only
699 * @valp: where to store the read data
701 * Reads up to 4 bytes of data from the serial flash. The location of
702 * the read needs to be specified prior to calling this by issuing the
703 * appropriate commands to the serial flash.
705 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
710 if (!byte_cnt || byte_cnt > 4)
712 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
714 t4_write_reg(adapter, A_SF_OP,
715 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
716 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
718 *valp = t4_read_reg(adapter, A_SF_DATA);
723 * sf1_write - write data to the serial flash
724 * @adapter: the adapter
725 * @byte_cnt: number of bytes to write
726 * @cont: whether another operation will be chained
727 * @lock: whether to lock SF for PL access only
728 * @val: value to write
730 * Writes up to 4 bytes of data to the serial flash. The location of
731 * the write needs to be specified prior to calling this by issuing the
732 * appropriate commands to the serial flash.
734 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
737 if (!byte_cnt || byte_cnt > 4)
739 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
741 t4_write_reg(adapter, A_SF_DATA, val);
742 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
743 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
744 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
748 * flash_wait_op - wait for a flash operation to complete
749 * @adapter: the adapter
750 * @attempts: max number of polls of the status register
751 * @delay: delay between polls in ms
753 * Wait for a flash operation to complete by polling the status register.
755 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
761 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
762 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
774 * t4_read_flash - read words from serial flash
775 * @adapter: the adapter
776 * @addr: the start address for the read
777 * @nwords: how many 32-bit words to read
778 * @data: where to store the read data
779 * @byte_oriented: whether to store data as bytes or as words
781 * Read the specified number of 32-bit words from the serial flash.
782 * If @byte_oriented is set the read data is stored as a byte array
783 * (i.e., big-endian), otherwise as 32-bit words in the platform's
786 int t4_read_flash(struct adapter *adapter, unsigned int addr,
787 unsigned int nwords, u32 *data, int byte_oriented)
791 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
794 addr = swab32(addr) | SF_RD_DATA_FAST;
796 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
797 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
800 for ( ; nwords; nwords--, data++) {
801 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
803 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
807 *data = htonl(*data);
813 * t4_write_flash - write up to a page of data to the serial flash
814 * @adapter: the adapter
815 * @addr: the start address to write
816 * @n: length of data to write in bytes
817 * @data: the data to write
818 * @byte_oriented: whether to store data as bytes or as words
820 * Writes up to a page of data (256 bytes) to the serial flash starting
821 * at the given address. All the data must be written to the same page.
822 * If @byte_oriented is set the write data is stored as byte stream
823 * (i.e. matches what on disk), otherwise in big-endian.
825 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
826 unsigned int n, const u8 *data, int byte_oriented)
829 u32 buf[SF_PAGE_SIZE / 4];
830 unsigned int i, c, left, val, offset = addr & 0xff;
832 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
835 val = swab32(addr) | SF_PROG_PAGE;
837 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
838 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
841 for (left = n; left; left -= c) {
843 for (val = 0, i = 0; i < c; ++i)
844 val = (val << 8) + *data++;
849 ret = sf1_write(adapter, c, c != left, 1, val);
853 ret = flash_wait_op(adapter, 8, 1);
857 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
859 /* Read the page to verify the write succeeded */
860 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
865 if (memcmp(data - n, (u8 *)buf + offset, n)) {
866 CH_ERR(adapter, "failed to correctly write the flash page "
873 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
878 * t4_get_fw_version - read the firmware version
879 * @adapter: the adapter
880 * @vers: where to place the version
882 * Reads the FW version from flash.
884 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
886 return t4_read_flash(adapter,
887 FLASH_FW_START + offsetof(struct fw_hdr, fw_ver), 1,
892 * t4_get_tp_version - read the TP microcode version
893 * @adapter: the adapter
894 * @vers: where to place the version
896 * Reads the TP microcode version from flash.
898 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
900 return t4_read_flash(adapter, FLASH_FW_START + offsetof(struct fw_hdr,
906 * t4_check_fw_version - check if the FW is compatible with this driver
907 * @adapter: the adapter
909 * Checks if an adapter's FW is compatible with the driver. Returns 0
910 * if there's exact match, a negative error if the version could not be
911 * read or there's a major version mismatch, and a positive value if the
912 * expected major version is found but there's a minor version mismatch.
914 int t4_check_fw_version(struct adapter *adapter)
916 int ret, major, minor, micro;
918 ret = t4_get_fw_version(adapter, &adapter->params.fw_vers);
920 ret = t4_get_tp_version(adapter, &adapter->params.tp_vers);
924 major = G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers);
925 minor = G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers);
926 micro = G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers);
928 if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
929 CH_ERR(adapter, "card FW has major version %u, driver wants "
930 "%u\n", major, FW_VERSION_MAJOR);
934 if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
935 return 0; /* perfect match */
937 /* Minor/micro version mismatch. Report it but often it's OK. */
942 * t4_flash_erase_sectors - erase a range of flash sectors
943 * @adapter: the adapter
944 * @start: the first sector to erase
945 * @end: the last sector to erase
947 * Erases the sectors in the given inclusive range.
949 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
953 while (start <= end) {
954 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
955 (ret = sf1_write(adapter, 4, 0, 1,
956 SF_ERASE_SECTOR | (start << 8))) != 0 ||
957 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
958 CH_ERR(adapter, "erase of flash sector %d failed, "
959 "error %d\n", start, ret);
964 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
969 * t4_flash_cfg_addr - return the address of the flash configuration file
970 * @adapter: the adapter
972 * Return the address within the flash where the Firmware Configuration
975 unsigned int t4_flash_cfg_addr(struct adapter *adapter)
977 if (adapter->params.sf_size == 0x100000)
978 return FLASH_FPGA_CFG_START;
980 return FLASH_CFG_START;
984 * t4_load_cfg - download config file
986 * @cfg_data: the cfg text file to write
987 * @size: text file size
989 * Write the supplied config text file to the card's serial flash.
991 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
995 unsigned int flash_cfg_start_sec;
996 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
998 addr = t4_flash_cfg_addr(adap);
999 flash_cfg_start_sec = addr / SF_SEC_SIZE;
1001 if (size > FLASH_CFG_MAX_SIZE) {
1002 CH_ERR(adap, "cfg file too large, max is %u bytes\n",
1003 FLASH_CFG_MAX_SIZE);
1007 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
1009 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
1010 flash_cfg_start_sec + i - 1);
1012 * If size == 0 then we're simply erasing the FLASH sectors associated
1013 * with the on-adapter Firmware Configuration File.
1015 if (ret || size == 0)
1018 /* this will write to the flash up to SF_PAGE_SIZE at a time */
1019 for (i = 0; i< size; i+= SF_PAGE_SIZE) {
1020 if ( (size - i) < SF_PAGE_SIZE)
1024 ret = t4_write_flash(adap, addr, n, cfg_data, 1);
1028 addr += SF_PAGE_SIZE;
1029 cfg_data += SF_PAGE_SIZE;
1034 CH_ERR(adap, "config file %s failed %d\n",
1035 (size == 0 ? "clear" : "download"), ret);
1041 * t4_load_fw - download firmware
1042 * @adap: the adapter
1043 * @fw_data: the firmware image to write
1046 * Write the supplied firmware image to the card's serial flash.
1048 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1053 u8 first_page[SF_PAGE_SIZE];
1054 const u32 *p = (const u32 *)fw_data;
1055 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
1056 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1059 CH_ERR(adap, "FW image has no data\n");
1063 CH_ERR(adap, "FW image size not multiple of 512 bytes\n");
1066 if (ntohs(hdr->len512) * 512 != size) {
1067 CH_ERR(adap, "FW image size differs from size in FW header\n");
1070 if (size > FLASH_FW_MAX_SIZE) {
1071 CH_ERR(adap, "FW image too large, max is %u bytes\n",
1076 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1077 csum += ntohl(p[i]);
1079 if (csum != 0xffffffff) {
1080 CH_ERR(adap, "corrupted firmware image, checksum %#x\n",
1085 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
1086 ret = t4_flash_erase_sectors(adap, FLASH_FW_START_SEC,
1087 FLASH_FW_START_SEC + i - 1);
1092 * We write the correct version at the end so the driver can see a bad
1093 * version if the FW write fails. Start by writing a copy of the
1094 * first page with a bad version.
1096 memcpy(first_page, fw_data, SF_PAGE_SIZE);
1097 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
1098 ret = t4_write_flash(adap, FLASH_FW_START, SF_PAGE_SIZE, first_page, 1);
1102 addr = FLASH_FW_START;
1103 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1104 addr += SF_PAGE_SIZE;
1105 fw_data += SF_PAGE_SIZE;
1106 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
1111 ret = t4_write_flash(adap,
1112 FLASH_FW_START + offsetof(struct fw_hdr, fw_ver),
1113 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
1116 CH_ERR(adap, "firmware download failed, error %d\n", ret);
1120 /* BIOS boot headers */
1121 typedef struct pci_expansion_rom_header {
1122 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
1123 u8 reserved[22]; /* Reserved per processor Architecture data */
1124 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
1125 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
1127 /* Legacy PCI Expansion ROM Header */
1128 typedef struct legacy_pci_expansion_rom_header {
1129 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
1130 u8 size512; /* Current Image Size in units of 512 bytes */
1131 u8 initentry_point[4];
1132 u8 cksum; /* Checksum computed on the entire Image */
1133 u8 reserved[16]; /* Reserved */
1134 u8 pcir_offset[2]; /* Offset to PCI Data Struture */
1135 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
1137 /* EFI PCI Expansion ROM Header */
1138 typedef struct efi_pci_expansion_rom_header {
1139 u8 signature[2]; // ROM signature. The value 0xaa55
1140 u8 initialization_size[2]; /* Units 512. Includes this header */
1141 u8 efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
1142 u8 efi_subsystem[2]; /* Subsystem value for EFI image header */
1143 u8 efi_machine_type[2]; /* Machine type from EFI image header */
1144 u8 compression_type[2]; /* Compression type. */
1146 * Compression type definition
1149 * 0x2-0xFFFF: Reserved
1151 u8 reserved[8]; /* Reserved */
1152 u8 efi_image_header_offset[2]; /* Offset to EFI Image */
1153 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
1154 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
1156 /* PCI Data Structure Format */
1157 typedef struct pcir_data_structure { /* PCI Data Structure */
1158 u8 signature[4]; /* Signature. The string "PCIR" */
1159 u8 vendor_id[2]; /* Vendor Identification */
1160 u8 device_id[2]; /* Device Identification */
1161 u8 vital_product[2]; /* Pointer to Vital Product Data */
1162 u8 length[2]; /* PCIR Data Structure Length */
1163 u8 revision; /* PCIR Data Structure Revision */
1164 u8 class_code[3]; /* Class Code */
1165 u8 image_length[2]; /* Image Length. Multiple of 512B */
1166 u8 code_revision[2]; /* Revision Level of Code/Data */
1167 u8 code_type; /* Code Type. */
1169 * PCI Expansion ROM Code Types
1170 * 0x00: Intel IA-32, PC-AT compatible. Legacy
1171 * 0x01: Open Firmware standard for PCI. FCODE
1172 * 0x02: Hewlett-Packard PA RISC. HP reserved
1173 * 0x03: EFI Image. EFI
1174 * 0x04-0xFF: Reserved.
1176 u8 indicator; /* Indicator. Identifies the last image in the ROM */
1177 u8 reserved[2]; /* Reserved */
1178 } pcir_data_t; /* PCI__DATA_STRUCTURE */
1180 /* BOOT constants */
1182 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
1183 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */
1184 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */
1185 BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
1186 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment */
1187 VENDOR_ID = 0x1425, /* Vendor ID */
1188 PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
1192 * modify_device_id - Modifies the device ID of the Boot BIOS image
1193 * @adatper: the device ID to write.
1194 * @boot_data: the boot image to modify.
1196 * Write the supplied device ID to the boot BIOS image.
1198 static void modify_device_id(int device_id, u8 *boot_data)
1200 legacy_pci_exp_rom_header_t *header;
1201 pcir_data_t *pcir_header;
1205 * Loop through all chained images and change the device ID's
1208 header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
1209 pcir_header = (pcir_data_t *) &boot_data[cur_header +
1210 le16_to_cpu(*(u16*)header->pcir_offset)];
1213 * Only modify the Device ID if code type is Legacy or HP.
1214 * 0x00: Okay to modify
1215 * 0x01: FCODE. Do not be modify
1216 * 0x03: Okay to modify
1217 * 0x04-0xFF: Do not modify
1219 if (pcir_header->code_type == 0x00) {
1224 * Modify Device ID to match current adatper
1226 *(u16*) pcir_header->device_id = device_id;
1229 * Set checksum temporarily to 0.
1230 * We will recalculate it later.
1232 header->cksum = 0x0;
1235 * Calculate and update checksum
1237 for (i = 0; i < (header->size512 * 512); i++)
1238 csum += (u8)boot_data[cur_header + i];
1241 * Invert summed value to create the checksum
1242 * Writing new checksum value directly to the boot data
1244 boot_data[cur_header + 7] = -csum;
1246 } else if (pcir_header->code_type == 0x03) {
1249 * Modify Device ID to match current adatper
1251 *(u16*) pcir_header->device_id = device_id;
1257 * Check indicator element to identify if this is the last
1260 if (pcir_header->indicator & 0x80)
1264 * Move header pointer up to the next image in the ROM.
1266 cur_header += header->size512 * 512;
1271 * t4_load_boot - download boot flash
1272 * @adapter: the adapter
1273 * @boot_data: the boot image to write
1274 * @boot_addr: offset in flash to write boot_data
1277 * Write the supplied boot image to the card's serial flash.
1278 * The boot image has the following sections: a 28-byte header and the
1281 int t4_load_boot(struct adapter *adap, u8 *boot_data,
1282 unsigned int boot_addr, unsigned int size)
1284 pci_exp_rom_header_t *header;
1286 pcir_data_t *pcir_header;
1290 unsigned int boot_sector = boot_addr * 1024;
1291 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1294 * Make sure the boot image does not encroach on the firmware region
1296 if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
1297 CH_ERR(adap, "boot image encroaching on firmware region\n");
1302 * Number of sectors spanned
1304 i = DIV_ROUND_UP(size ? size : FLASH_BOOTCFG_MAX_SIZE,
1306 ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
1307 (boot_sector >> 16) + i - 1);
1310 * If size == 0 then we're simply erasing the FLASH sectors associated
1311 * with the on-adapter option ROM file
1313 if (ret || (size == 0))
1316 /* Get boot header */
1317 header = (pci_exp_rom_header_t *)boot_data;
1318 pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
1319 /* PCIR Data Structure */
1320 pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
1323 * Perform some primitive sanity testing to avoid accidentally
1324 * writing garbage over the boot sectors. We ought to check for
1325 * more but it's not worth it for now ...
1327 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
1328 CH_ERR(adap, "boot image too small/large\n");
1333 * Check BOOT ROM header signature
1335 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
1336 CH_ERR(adap, "Boot image missing signature\n");
1341 * Check PCI header signature
1343 if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
1344 CH_ERR(adap, "PCI header missing signature\n");
1349 * Check Vendor ID matches Chelsio ID
1351 if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
1352 CH_ERR(adap, "Vendor ID missing signature\n");
1357 * Retrieve adapter's device ID
1359 t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
1360 /* Want to deal with PF 0 so I strip off PF 4 indicator */
1361 device_id = (device_id & 0xff) | 0x4000;
1364 * Check PCIE Device ID
1366 if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
1368 * Change the device ID in the Boot BIOS image to match
1369 * the Device ID of the current adapter.
1371 modify_device_id(device_id, boot_data);
1375 * Skip over the first SF_PAGE_SIZE worth of data and write it after
1376 * we finish copying the rest of the boot image. This will ensure
1377 * that the BIOS boot header will only be written if the boot image
1378 * was written in full.
1381 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1382 addr += SF_PAGE_SIZE;
1383 boot_data += SF_PAGE_SIZE;
1384 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
1389 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, boot_data, 0);
1393 CH_ERR(adap, "boot image download failed, error %d\n", ret);
1398 * t4_read_cimq_cfg - read CIM queue configuration
1399 * @adap: the adapter
1400 * @base: holds the queue base addresses in bytes
1401 * @size: holds the queue sizes in bytes
1402 * @thres: holds the queue full thresholds in bytes
1404 * Returns the current configuration of the CIM queues, starting with
1405 * the IBQs, then the OBQs.
1407 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
1411 for (i = 0; i < CIM_NUM_IBQ; i++) {
1412 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
1414 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1415 *base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1416 *size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1417 *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
1419 for (i = 0; i < CIM_NUM_OBQ; i++) {
1420 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1422 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1423 *base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1424 *size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1429 * t4_read_cim_ibq - read the contents of a CIM inbound queue
1430 * @adap: the adapter
1431 * @qid: the queue index
1432 * @data: where to store the queue contents
1433 * @n: capacity of @data in 32-bit words
1435 * Reads the contents of the selected CIM queue starting at address 0 up
1436 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
1437 * error and the number of 32-bit words actually read on success.
1439 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1443 const unsigned int nwords = CIM_IBQ_SIZE * 4;
1445 if (qid > 5 || (n & 3))
1448 addr = qid * nwords;
1452 for (i = 0; i < n; i++, addr++) {
1453 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
1455 err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
1459 *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
1461 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
1466 * t4_read_cim_obq - read the contents of a CIM outbound queue
1467 * @adap: the adapter
1468 * @qid: the queue index
1469 * @data: where to store the queue contents
1470 * @n: capacity of @data in 32-bit words
1472 * Reads the contents of the selected CIM queue starting at address 0 up
1473 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
1474 * error and the number of 32-bit words actually read on success.
1476 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1479 unsigned int addr, v, nwords;
1481 if (qid > 5 || (n & 3))
1484 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1485 V_QUENUMSELECT(qid));
1486 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1488 addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */
1489 nwords = G_CIMQSIZE(v) * 64; /* same */
1493 for (i = 0; i < n; i++, addr++) {
1494 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
1496 err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
1500 *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
1502 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
1508 CIM_CTL_BASE = 0x2000,
1509 CIM_PBT_ADDR_BASE = 0x2800,
1510 CIM_PBT_LRF_BASE = 0x3000,
1511 CIM_PBT_DATA_BASE = 0x3800
1515 * t4_cim_read - read a block from CIM internal address space
1516 * @adap: the adapter
1517 * @addr: the start address within the CIM address space
1518 * @n: number of words to read
1519 * @valp: where to store the result
1521 * Reads a block of 4-byte words from the CIM intenal address space.
1523 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
1528 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1531 for ( ; !ret && n--; addr += 4) {
1532 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
1533 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1536 *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
1542 * t4_cim_write - write a block into CIM internal address space
1543 * @adap: the adapter
1544 * @addr: the start address within the CIM address space
1545 * @n: number of words to write
1546 * @valp: set of values to write
1548 * Writes a block of 4-byte words into the CIM intenal address space.
1550 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
1551 const unsigned int *valp)
1555 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1558 for ( ; !ret && n--; addr += 4) {
1559 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
1560 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
1561 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1567 static int t4_cim_write1(struct adapter *adap, unsigned int addr, unsigned int val)
1569 return t4_cim_write(adap, addr, 1, &val);
1573 * t4_cim_ctl_read - read a block from CIM control region
1574 * @adap: the adapter
1575 * @addr: the start address within the CIM control region
1576 * @n: number of words to read
1577 * @valp: where to store the result
1579 * Reads a block of 4-byte words from the CIM control region.
1581 int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
1584 return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
1588 * t4_cim_read_la - read CIM LA capture buffer
1589 * @adap: the adapter
1590 * @la_buf: where to store the LA data
1591 * @wrptr: the HW write pointer within the capture buffer
1593 * Reads the contents of the CIM LA buffer with the most recent entry at
1594 * the end of the returned data and with the entry at @wrptr first.
1595 * We try to leave the LA in the running state we find it in.
1597 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
1600 unsigned int cfg, val, idx;
1602 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
1606 if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */
1607 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
1612 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1616 idx = G_UPDBGLAWRPTR(val);
1620 for (i = 0; i < adap->params.cim_la_size; i++) {
1621 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1622 V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
1625 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1628 if (val & F_UPDBGLARDEN) {
1632 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
1635 idx = (idx + 1) & M_UPDBGLARDPTR;
1638 if (cfg & F_UPDBGLAEN) {
1639 int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1640 cfg & ~F_UPDBGLARDEN);
1647 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
1648 unsigned int *pif_req_wrptr,
1649 unsigned int *pif_rsp_wrptr)
1652 u32 cfg, val, req, rsp;
1654 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1655 if (cfg & F_LADBGEN)
1656 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1658 val = t4_read_reg(adap, A_CIM_DEBUGSTS);
1659 req = G_POLADBGWRPTR(val);
1660 rsp = G_PILADBGWRPTR(val);
1662 *pif_req_wrptr = req;
1664 *pif_rsp_wrptr = rsp;
1666 for (i = 0; i < CIM_PIFLA_SIZE; i++) {
1667 for (j = 0; j < 6; j++) {
1668 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
1669 V_PILADBGRDPTR(rsp));
1670 *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
1671 *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
1675 req = (req + 2) & M_POLADBGRDPTR;
1676 rsp = (rsp + 2) & M_PILADBGRDPTR;
1678 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1681 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
1686 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1687 if (cfg & F_LADBGEN)
1688 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1690 for (i = 0; i < CIM_MALA_SIZE; i++) {
1691 for (j = 0; j < 5; j++) {
1693 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
1694 V_PILADBGRDPTR(idx));
1695 *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
1696 *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
1699 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1703 * t4_tp_read_la - read TP LA capture buffer
1704 * @adap: the adapter
1705 * @la_buf: where to store the LA data
1706 * @wrptr: the HW write pointer within the capture buffer
1708 * Reads the contents of the TP LA buffer with the most recent entry at
1709 * the end of the returned data and with the entry at @wrptr first.
1710 * We leave the LA in the running state we find it in.
1712 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
1714 bool last_incomplete;
1715 unsigned int i, cfg, val, idx;
1717 cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
1718 if (cfg & F_DBGLAENABLE) /* freeze LA */
1719 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1720 adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
1722 val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
1723 idx = G_DBGLAWPTR(val);
1724 last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
1725 if (last_incomplete)
1726 idx = (idx + 1) & M_DBGLARPTR;
1731 val &= ~V_DBGLARPTR(M_DBGLARPTR);
1732 val |= adap->params.tp.la_mask;
1734 for (i = 0; i < TPLA_SIZE; i++) {
1735 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
1736 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
1737 idx = (idx + 1) & M_DBGLARPTR;
1740 /* Wipe out last entry if it isn't valid */
1741 if (last_incomplete)
1742 la_buf[TPLA_SIZE - 1] = ~0ULL;
1744 if (cfg & F_DBGLAENABLE) /* restore running state */
1745 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1746 cfg | adap->params.tp.la_mask);
1749 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
1753 for (i = 0; i < 8; i++) {
1754 u32 *p = la_buf + i;
1756 t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
1757 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
1758 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
1759 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
1760 *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
1764 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1765 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
1768 * t4_link_start - apply link configuration to MAC/PHY
1769 * @phy: the PHY to setup
1770 * @mac: the MAC to setup
1771 * @lc: the requested link configuration
1773 * Set up a port's MAC and PHY according to a desired link configuration.
1774 * - If the PHY can auto-negotiate first decide what to advertise, then
1775 * enable/disable auto-negotiation as desired, and reset.
1776 * - If the PHY does not auto-negotiate just reset it.
1777 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1778 * otherwise do it later based on the outcome of auto-negotiation.
1780 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1781 struct link_config *lc)
1783 struct fw_port_cmd c;
1784 unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
1787 if (lc->requested_fc & PAUSE_RX)
1788 fc |= FW_PORT_CAP_FC_RX;
1789 if (lc->requested_fc & PAUSE_TX)
1790 fc |= FW_PORT_CAP_FC_TX;
1792 memset(&c, 0, sizeof(c));
1793 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1794 F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1795 c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1798 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1799 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1800 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1801 } else if (lc->autoneg == AUTONEG_DISABLE) {
1802 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1803 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1805 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1807 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1811 * t4_restart_aneg - restart autonegotiation
1812 * @adap: the adapter
1813 * @mbox: mbox to use for the FW command
1814 * @port: the port id
1816 * Restarts autonegotiation for the selected port.
1818 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1820 struct fw_port_cmd c;
1822 memset(&c, 0, sizeof(c));
1823 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1824 F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1825 c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1827 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1828 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1832 unsigned int mask; /* bits to check in interrupt status */
1833 const char *msg; /* message to print or NULL */
1834 short stat_idx; /* stat counter to increment or -1 */
1835 unsigned short fatal; /* whether the condition reported is fatal */
1839 * t4_handle_intr_status - table driven interrupt handler
1840 * @adapter: the adapter that generated the interrupt
1841 * @reg: the interrupt status register to process
1842 * @acts: table of interrupt actions
1844 * A table driven interrupt handler that applies a set of masks to an
1845 * interrupt status word and performs the corresponding actions if the
1846 * interrupts described by the mask have occured. The actions include
1847 * optionally emitting a warning or alert message. The table is terminated
1848 * by an entry specifying mask 0. Returns the number of fatal interrupt
1851 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1852 const struct intr_info *acts)
1855 unsigned int mask = 0;
1856 unsigned int status = t4_read_reg(adapter, reg);
1858 for ( ; acts->mask; ++acts) {
1859 if (!(status & acts->mask))
1863 CH_ALERT(adapter, "%s (0x%x)\n",
1864 acts->msg, status & acts->mask);
1865 } else if (acts->msg)
1866 CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n",
1867 acts->msg, status & acts->mask);
1871 if (status) /* clear processed interrupts */
1872 t4_write_reg(adapter, reg, status);
1877 * Interrupt handler for the PCIE module.
1879 static void pcie_intr_handler(struct adapter *adapter)
1881 static struct intr_info sysbus_intr_info[] = {
1882 { F_RNPP, "RXNP array parity error", -1, 1 },
1883 { F_RPCP, "RXPC array parity error", -1, 1 },
1884 { F_RCIP, "RXCIF array parity error", -1, 1 },
1885 { F_RCCP, "Rx completions control array parity error", -1, 1 },
1886 { F_RFTP, "RXFT array parity error", -1, 1 },
1889 static struct intr_info pcie_port_intr_info[] = {
1890 { F_TPCP, "TXPC array parity error", -1, 1 },
1891 { F_TNPP, "TXNP array parity error", -1, 1 },
1892 { F_TFTP, "TXFT array parity error", -1, 1 },
1893 { F_TCAP, "TXCA array parity error", -1, 1 },
1894 { F_TCIP, "TXCIF array parity error", -1, 1 },
1895 { F_RCAP, "RXCA array parity error", -1, 1 },
1896 { F_OTDD, "outbound request TLP discarded", -1, 1 },
1897 { F_RDPE, "Rx data parity error", -1, 1 },
1898 { F_TDUE, "Tx uncorrectable data error", -1, 1 },
1901 static struct intr_info pcie_intr_info[] = {
1902 { F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
1903 { F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
1904 { F_MSIDATAPERR, "MSI data parity error", -1, 1 },
1905 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1906 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1907 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1908 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1909 { F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
1910 { F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
1911 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1912 { F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
1913 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1914 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1915 { F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
1916 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1917 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1918 { F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
1919 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1920 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1921 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1922 { F_FIDPERR, "PCI FID parity error", -1, 1 },
1923 { F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
1924 { F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
1925 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1926 { F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
1927 { F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
1928 { F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
1929 { F_PCIESINT, "PCI core secondary fault", -1, 1 },
1930 { F_PCIEPINT, "PCI core primary fault", -1, 1 },
1931 { F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
1938 fat = t4_handle_intr_status(adapter,
1939 A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1941 t4_handle_intr_status(adapter,
1942 A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1943 pcie_port_intr_info) +
1944 t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE, pcie_intr_info);
1946 t4_fatal_err(adapter);
1950 * TP interrupt handler.
1952 static void tp_intr_handler(struct adapter *adapter)
1954 static struct intr_info tp_intr_info[] = {
1955 { 0x3fffffff, "TP parity error", -1, 1 },
1956 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1960 if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
1961 t4_fatal_err(adapter);
1965 * SGE interrupt handler.
1967 static void sge_intr_handler(struct adapter *adapter)
1972 static struct intr_info sge_intr_info[] = {
1973 { F_ERR_CPL_EXCEED_IQE_SIZE,
1974 "SGE received CPL exceeding IQE size", -1, 1 },
1975 { F_ERR_INVALID_CIDX_INC,
1976 "SGE GTS CIDX increment too large", -1, 0 },
1977 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
1978 { F_ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
1979 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
1980 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1981 { F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1983 { F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1985 { F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1987 { F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1989 { F_ERR_ING_CTXT_PRIO,
1990 "SGE too many priority ingress contexts", -1, 0 },
1991 { F_ERR_EGR_CTXT_PRIO,
1992 "SGE too many priority egress contexts", -1, 0 },
1993 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1994 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1998 v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
1999 ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
2001 CH_ALERT(adapter, "SGE parity error (%#llx)\n",
2002 (unsigned long long)v);
2003 t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
2004 t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
2007 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
2009 err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
2010 if (err & F_ERROR_QID_VALID) {
2011 CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
2012 if (err & F_UNCAPTURED_ERROR)
2013 CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
2014 t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
2015 F_UNCAPTURED_ERROR);
2019 t4_fatal_err(adapter);
2022 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
2023 F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
2024 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
2025 F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
2028 * CIM interrupt handler.
2030 static void cim_intr_handler(struct adapter *adapter)
2032 static struct intr_info cim_intr_info[] = {
2033 { F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
2034 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
2035 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
2036 { F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
2037 { F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
2038 { F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
2039 { F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
2042 static struct intr_info cim_upintr_info[] = {
2043 { F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
2044 { F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
2045 { F_ILLWRINT, "CIM illegal write", -1, 1 },
2046 { F_ILLRDINT, "CIM illegal read", -1, 1 },
2047 { F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
2048 { F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
2049 { F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
2050 { F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
2051 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
2052 { F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
2053 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
2054 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
2055 { F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
2056 { F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
2057 { F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
2058 { F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
2059 { F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
2060 { F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
2061 { F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
2062 { F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
2063 { F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
2064 { F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
2065 { F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
2066 { F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
2067 { F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
2068 { F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
2069 { F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
2070 { F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
2075 if (t4_read_reg(adapter, A_PCIE_FW) & F_PCIE_FW_ERR)
2076 t4_report_fw_error(adapter);
2078 fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
2080 t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
2083 t4_fatal_err(adapter);
2087 * ULP RX interrupt handler.
2089 static void ulprx_intr_handler(struct adapter *adapter)
2091 static struct intr_info ulprx_intr_info[] = {
2092 { F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
2093 { F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
2094 { 0x7fffff, "ULPRX parity error", -1, 1 },
2098 if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
2099 t4_fatal_err(adapter);
2103 * ULP TX interrupt handler.
2105 static void ulptx_intr_handler(struct adapter *adapter)
2107 static struct intr_info ulptx_intr_info[] = {
2108 { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
2110 { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
2112 { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
2114 { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
2116 { 0xfffffff, "ULPTX parity error", -1, 1 },
2120 if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
2121 t4_fatal_err(adapter);
2125 * PM TX interrupt handler.
2127 static void pmtx_intr_handler(struct adapter *adapter)
2129 static struct intr_info pmtx_intr_info[] = {
2130 { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
2131 { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
2132 { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
2133 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
2134 { 0xffffff0, "PMTX framing error", -1, 1 },
2135 { F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
2136 { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
2138 { F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
2139 { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
2143 if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
2144 t4_fatal_err(adapter);
2148 * PM RX interrupt handler.
2150 static void pmrx_intr_handler(struct adapter *adapter)
2152 static struct intr_info pmrx_intr_info[] = {
2153 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
2154 { 0x3ffff0, "PMRX framing error", -1, 1 },
2155 { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
2156 { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
2158 { F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
2159 { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
2163 if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
2164 t4_fatal_err(adapter);
2168 * CPL switch interrupt handler.
2170 static void cplsw_intr_handler(struct adapter *adapter)
2172 static struct intr_info cplsw_intr_info[] = {
2173 { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
2174 { F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
2175 { F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
2176 { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
2177 { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
2178 { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
2182 if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
2183 t4_fatal_err(adapter);
2187 * LE interrupt handler.
2189 static void le_intr_handler(struct adapter *adap)
2191 static struct intr_info le_intr_info[] = {
2192 { F_LIPMISS, "LE LIP miss", -1, 0 },
2193 { F_LIP0, "LE 0 LIP error", -1, 0 },
2194 { F_PARITYERR, "LE parity error", -1, 1 },
2195 { F_UNKNOWNCMD, "LE unknown command", -1, 1 },
2196 { F_REQQPARERR, "LE request queue parity error", -1, 1 },
2200 if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE, le_intr_info))
2205 * MPS interrupt handler.
2207 static void mps_intr_handler(struct adapter *adapter)
2209 static struct intr_info mps_rx_intr_info[] = {
2210 { 0xffffff, "MPS Rx parity error", -1, 1 },
2213 static struct intr_info mps_tx_intr_info[] = {
2214 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
2215 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
2216 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
2218 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
2220 { F_BUBBLE, "MPS Tx underflow", -1, 1 },
2221 { F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
2222 { F_FRMERR, "MPS Tx framing error", -1, 1 },
2225 static struct intr_info mps_trc_intr_info[] = {
2226 { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
2227 { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
2229 { F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
2232 static struct intr_info mps_stat_sram_intr_info[] = {
2233 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
2236 static struct intr_info mps_stat_tx_intr_info[] = {
2237 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
2240 static struct intr_info mps_stat_rx_intr_info[] = {
2241 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
2244 static struct intr_info mps_cls_intr_info[] = {
2245 { F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
2246 { F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
2247 { F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
2253 fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
2255 t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
2257 t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
2258 mps_trc_intr_info) +
2259 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
2260 mps_stat_sram_intr_info) +
2261 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
2262 mps_stat_tx_intr_info) +
2263 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
2264 mps_stat_rx_intr_info) +
2265 t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
2268 t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
2269 t4_read_reg(adapter, A_MPS_INT_CAUSE); /* flush */
2271 t4_fatal_err(adapter);
2274 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | F_ECC_UE_INT_CAUSE)
2277 * EDC/MC interrupt handler.
2279 static void mem_intr_handler(struct adapter *adapter, int idx)
2281 static const char name[3][5] = { "EDC0", "EDC1", "MC" };
2283 unsigned int addr, cnt_addr, v;
2285 if (idx <= MEM_EDC1) {
2286 addr = EDC_REG(A_EDC_INT_CAUSE, idx);
2287 cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
2289 addr = A_MC_INT_CAUSE;
2290 cnt_addr = A_MC_ECC_STATUS;
2293 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
2294 if (v & F_PERR_INT_CAUSE)
2295 CH_ALERT(adapter, "%s FIFO parity error\n", name[idx]);
2296 if (v & F_ECC_CE_INT_CAUSE) {
2297 u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
2299 t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
2300 CH_WARN_RATELIMIT(adapter,
2301 "%u %s correctable ECC data error%s\n",
2302 cnt, name[idx], cnt > 1 ? "s" : "");
2304 if (v & F_ECC_UE_INT_CAUSE)
2305 CH_ALERT(adapter, "%s uncorrectable ECC data error\n",
2308 t4_write_reg(adapter, addr, v);
2309 if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
2310 t4_fatal_err(adapter);
2314 * MA interrupt handler.
2316 static void ma_intr_handler(struct adapter *adapter)
2318 u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
2320 if (status & F_MEM_PERR_INT_CAUSE)
2321 CH_ALERT(adapter, "MA parity error, parity status %#x\n",
2322 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS));
2323 if (status & F_MEM_WRAP_INT_CAUSE) {
2324 v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
2325 CH_ALERT(adapter, "MA address wrap-around error by client %u to"
2326 " address %#x\n", G_MEM_WRAP_CLIENT_NUM(v),
2327 G_MEM_WRAP_ADDRESS(v) << 4);
2329 t4_write_reg(adapter, A_MA_INT_CAUSE, status);
2330 t4_fatal_err(adapter);
2334 * SMB interrupt handler.
2336 static void smb_intr_handler(struct adapter *adap)
2338 static struct intr_info smb_intr_info[] = {
2339 { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
2340 { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
2341 { F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
2345 if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
2350 * NC-SI interrupt handler.
2352 static void ncsi_intr_handler(struct adapter *adap)
2354 static struct intr_info ncsi_intr_info[] = {
2355 { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
2356 { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
2357 { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
2358 { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
2362 if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
2367 * XGMAC interrupt handler.
2369 static void xgmac_intr_handler(struct adapter *adap, int port)
2371 u32 v = t4_read_reg(adap, PORT_REG(port, A_XGMAC_PORT_INT_CAUSE));
2373 v &= F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR;
2377 if (v & F_TXFIFO_PRTY_ERR)
2378 CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n", port);
2379 if (v & F_RXFIFO_PRTY_ERR)
2380 CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n", port);
2381 t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_INT_CAUSE), v);
2386 * PL interrupt handler.
2388 static void pl_intr_handler(struct adapter *adap)
2390 static struct intr_info pl_intr_info[] = {
2391 { F_FATALPERR, "T4 fatal parity error", -1, 1 },
2392 { F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
2396 if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE, pl_intr_info))
2400 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
2401 #define GLBL_INTR_MASK (F_CIM | F_MPS | F_PL | F_PCIE | F_MC | F_EDC0 | \
2402 F_EDC1 | F_LE | F_TP | F_MA | F_PM_TX | F_PM_RX | F_ULP_RX | \
2403 F_CPL_SWITCH | F_SGE | F_ULP_TX)
2406 * t4_slow_intr_handler - control path interrupt handler
2407 * @adapter: the adapter
2409 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
2410 * The designation 'slow' is because it involves register reads, while
2411 * data interrupts typically don't involve any MMIOs.
2413 int t4_slow_intr_handler(struct adapter *adapter)
2415 u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
2417 if (!(cause & GLBL_INTR_MASK))
2420 cim_intr_handler(adapter);
2422 mps_intr_handler(adapter);
2424 ncsi_intr_handler(adapter);
2426 pl_intr_handler(adapter);
2428 smb_intr_handler(adapter);
2429 if (cause & F_XGMAC0)
2430 xgmac_intr_handler(adapter, 0);
2431 if (cause & F_XGMAC1)
2432 xgmac_intr_handler(adapter, 1);
2433 if (cause & F_XGMAC_KR0)
2434 xgmac_intr_handler(adapter, 2);
2435 if (cause & F_XGMAC_KR1)
2436 xgmac_intr_handler(adapter, 3);
2438 pcie_intr_handler(adapter);
2440 mem_intr_handler(adapter, MEM_MC);
2442 mem_intr_handler(adapter, MEM_EDC0);
2444 mem_intr_handler(adapter, MEM_EDC1);
2446 le_intr_handler(adapter);
2448 tp_intr_handler(adapter);
2450 ma_intr_handler(adapter);
2451 if (cause & F_PM_TX)
2452 pmtx_intr_handler(adapter);
2453 if (cause & F_PM_RX)
2454 pmrx_intr_handler(adapter);
2455 if (cause & F_ULP_RX)
2456 ulprx_intr_handler(adapter);
2457 if (cause & F_CPL_SWITCH)
2458 cplsw_intr_handler(adapter);
2460 sge_intr_handler(adapter);
2461 if (cause & F_ULP_TX)
2462 ulptx_intr_handler(adapter);
2464 /* Clear the interrupts just processed for which we are the master. */
2465 t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
2466 (void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
2471 * t4_intr_enable - enable interrupts
2472 * @adapter: the adapter whose interrupts should be enabled
2474 * Enable PF-specific interrupts for the calling function and the top-level
2475 * interrupt concentrator for global interrupts. Interrupts are already
2476 * enabled at each module, here we just enable the roots of the interrupt
2479 * Note: this function should be called only when the driver manages
2480 * non PF-specific interrupts from the various HW modules. Only one PCI
2481 * function at a time should be doing this.
2483 void t4_intr_enable(struct adapter *adapter)
2485 u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2487 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
2488 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
2489 F_ERR_DROPPED_DB | F_ERR_DATA_CPL_ON_HIGH_QID1 |
2490 F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
2491 F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
2492 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
2493 F_ERR_EGR_CTXT_PRIO | F_INGRESS_SIZE_ERR |
2495 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
2496 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
2500 * t4_intr_disable - disable interrupts
2501 * @adapter: the adapter whose interrupts should be disabled
2503 * Disable interrupts. We only disable the top-level interrupt
2504 * concentrators. The caller must be a PCI function managing global
2507 void t4_intr_disable(struct adapter *adapter)
2509 u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2511 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
2512 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
2516 * t4_intr_clear - clear all interrupts
2517 * @adapter: the adapter whose interrupts should be cleared
2519 * Clears all interrupts. The caller must be a PCI function managing
2520 * global interrupts.
2522 void t4_intr_clear(struct adapter *adapter)
2524 static const unsigned int cause_reg[] = {
2525 A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
2526 A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
2527 A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
2528 A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
2530 A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS, A_MA_INT_CAUSE,
2531 A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
2532 A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
2533 MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
2535 A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE,
2536 A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE,
2537 A_MPS_RX_PERR_INT_CAUSE,
2539 MYPF_REG(A_PL_PF_INT_CAUSE),
2546 for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
2547 t4_write_reg(adapter, cause_reg[i], 0xffffffff);
2549 t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
2550 (void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
2554 * hash_mac_addr - return the hash value of a MAC address
2555 * @addr: the 48-bit Ethernet MAC address
2557 * Hashes a MAC address according to the hash function used by HW inexact
2558 * (hash) address matching.
2560 static int hash_mac_addr(const u8 *addr)
2562 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
2563 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
2571 * t4_config_rss_range - configure a portion of the RSS mapping table
2572 * @adapter: the adapter
2573 * @mbox: mbox to use for the FW command
2574 * @viid: virtual interface whose RSS subtable is to be written
2575 * @start: start entry in the table to write
2576 * @n: how many table entries to write
2577 * @rspq: values for the "response queue" (Ingress Queue) lookup table
2578 * @nrspq: number of values in @rspq
2580 * Programs the selected part of the VI's RSS mapping table with the
2581 * provided values. If @nrspq < @n the supplied values are used repeatedly
2582 * until the full table range is populated.
2584 * The caller must ensure the values in @rspq are in the range allowed for
2587 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2588 int start, int n, const u16 *rspq, unsigned int nrspq)
2591 const u16 *rsp = rspq;
2592 const u16 *rsp_end = rspq + nrspq;
2593 struct fw_rss_ind_tbl_cmd cmd;
2595 memset(&cmd, 0, sizeof(cmd));
2596 cmd.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2597 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2598 V_FW_RSS_IND_TBL_CMD_VIID(viid));
2599 cmd.retval_len16 = htonl(FW_LEN16(cmd));
2603 * Each firmware RSS command can accommodate up to 32 RSS Ingress
2604 * Queue Identifiers. These Ingress Queue IDs are packed three to
2605 * a 32-bit word as 10-bit values with the upper remaining 2 bits
2609 int nq = min(n, 32);
2611 __be32 *qp = &cmd.iq0_to_iq2;
2614 * Set up the firmware RSS command header to send the next
2615 * "nq" Ingress Queue IDs to the firmware.
2617 cmd.niqid = htons(nq);
2618 cmd.startidx = htons(start);
2621 * "nq" more done for the start of the next loop.
2627 * While there are still Ingress Queue IDs to stuff into the
2628 * current firmware RSS command, retrieve them from the
2629 * Ingress Queue ID array and insert them into the command.
2633 * Grab up to the next 3 Ingress Queue IDs (wrapping
2634 * around the Ingress Queue ID array if necessary) and
2635 * insert them into the firmware RSS command at the
2636 * current 3-tuple position within the commad.
2640 int nqbuf = min(3, nq);
2643 qbuf[0] = qbuf[1] = qbuf[2] = 0;
2644 while (nqbuf && nq_packed < 32) {
2651 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
2652 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
2653 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
2657 * Send this portion of the RRS table update to the firmware;
2658 * bail out on any errors.
2660 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
2669 * t4_config_glbl_rss - configure the global RSS mode
2670 * @adapter: the adapter
2671 * @mbox: mbox to use for the FW command
2672 * @mode: global RSS mode
2673 * @flags: mode-specific flags
2675 * Sets the global RSS mode.
2677 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2680 struct fw_rss_glb_config_cmd c;
2682 memset(&c, 0, sizeof(c));
2683 c.op_to_write = htonl(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
2684 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2685 c.retval_len16 = htonl(FW_LEN16(c));
2686 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
2687 c.u.manual.mode_pkd = htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2688 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2689 c.u.basicvirtual.mode_pkd =
2690 htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2691 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2694 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2698 * t4_config_vi_rss - configure per VI RSS settings
2699 * @adapter: the adapter
2700 * @mbox: mbox to use for the FW command
2703 * @defq: id of the default RSS queue for the VI.
2705 * Configures VI-specific RSS properties.
2707 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
2708 unsigned int flags, unsigned int defq)
2710 struct fw_rss_vi_config_cmd c;
2712 memset(&c, 0, sizeof(c));
2713 c.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2714 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2715 V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
2716 c.retval_len16 = htonl(FW_LEN16(c));
2717 c.u.basicvirtual.defaultq_to_udpen = htonl(flags |
2718 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
2719 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2722 /* Read an RSS table row */
2723 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
2725 t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
2726 return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
2731 * t4_read_rss - read the contents of the RSS mapping table
2732 * @adapter: the adapter
2733 * @map: holds the contents of the RSS mapping table
2735 * Reads the contents of the RSS hash->queue mapping table.
2737 int t4_read_rss(struct adapter *adapter, u16 *map)
2742 for (i = 0; i < RSS_NENTRIES / 2; ++i) {
2743 ret = rd_rss_row(adapter, i, &val);
2746 *map++ = G_LKPTBLQUEUE0(val);
2747 *map++ = G_LKPTBLQUEUE1(val);
2753 * t4_read_rss_key - read the global RSS key
2754 * @adap: the adapter
2755 * @key: 10-entry array holding the 320-bit RSS key
2757 * Reads the global 320-bit RSS key.
2759 void t4_read_rss_key(struct adapter *adap, u32 *key)
2761 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2762 A_TP_RSS_SECRET_KEY0);
2766 * t4_write_rss_key - program one of the RSS keys
2767 * @adap: the adapter
2768 * @key: 10-entry array holding the 320-bit RSS key
2769 * @idx: which RSS key to write
2771 * Writes one of the RSS keys with the given 320-bit value. If @idx is
2772 * 0..15 the corresponding entry in the RSS key table is written,
2773 * otherwise the global RSS key is written.
2775 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
2777 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2778 A_TP_RSS_SECRET_KEY0);
2779 if (idx >= 0 && idx < 16)
2780 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
2781 V_KEYWRADDR(idx) | F_KEYWREN);
2785 * t4_read_rss_pf_config - read PF RSS Configuration Table
2786 * @adapter: the adapter
2787 * @index: the entry in the PF RSS table to read
2788 * @valp: where to store the returned value
2790 * Reads the PF RSS Configuration Table at the specified index and returns
2791 * the value found there.
2793 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp)
2795 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2796 valp, 1, A_TP_RSS_PF0_CONFIG + index);
2800 * t4_write_rss_pf_config - write PF RSS Configuration Table
2801 * @adapter: the adapter
2802 * @index: the entry in the VF RSS table to read
2803 * @val: the value to store
2805 * Writes the PF RSS Configuration Table at the specified index with the
2808 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, u32 val)
2810 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2811 &val, 1, A_TP_RSS_PF0_CONFIG + index);
2815 * t4_read_rss_vf_config - read VF RSS Configuration Table
2816 * @adapter: the adapter
2817 * @index: the entry in the VF RSS table to read
2818 * @vfl: where to store the returned VFL
2819 * @vfh: where to store the returned VFH
2821 * Reads the VF RSS Configuration Table at the specified index and returns
2822 * the (VFL, VFH) values found there.
2824 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
2830 * Request that the index'th VF Table values be read into VFL/VFH.
2832 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
2833 vrt &= ~(F_VFRDRG | V_VFWRADDR(M_VFWRADDR) | F_VFWREN | F_KEYWREN);
2834 vrt |= V_VFWRADDR(index) | F_VFRDEN;
2835 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
2838 * Grab the VFL/VFH values ...
2840 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2841 vfl, 1, A_TP_RSS_VFL_CONFIG);
2842 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2843 vfh, 1, A_TP_RSS_VFH_CONFIG);
2847 * t4_write_rss_vf_config - write VF RSS Configuration Table
2849 * @adapter: the adapter
2850 * @index: the entry in the VF RSS table to write
2851 * @vfl: the VFL to store
2852 * @vfh: the VFH to store
2854 * Writes the VF RSS Configuration Table at the specified index with the
2855 * specified (VFL, VFH) values.
2857 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
2863 * Load up VFL/VFH with the values to be written ...
2865 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2866 &vfl, 1, A_TP_RSS_VFL_CONFIG);
2867 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2868 &vfh, 1, A_TP_RSS_VFH_CONFIG);
2871 * Write the VFL/VFH into the VF Table at index'th location.
2873 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
2874 vrt &= ~(F_VFRDRG | F_VFRDEN | V_VFWRADDR(M_VFWRADDR) | F_KEYWREN);
2875 vrt |= V_VFWRADDR(index) | F_VFWREN;
2876 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
2880 * t4_read_rss_pf_map - read PF RSS Map
2881 * @adapter: the adapter
2883 * Reads the PF RSS Map register and returns its value.
2885 u32 t4_read_rss_pf_map(struct adapter *adapter)
2889 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2890 &pfmap, 1, A_TP_RSS_PF_MAP);
2895 * t4_write_rss_pf_map - write PF RSS Map
2896 * @adapter: the adapter
2897 * @pfmap: PF RSS Map value
2899 * Writes the specified value to the PF RSS Map register.
2901 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap)
2903 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2904 &pfmap, 1, A_TP_RSS_PF_MAP);
2908 * t4_read_rss_pf_mask - read PF RSS Mask
2909 * @adapter: the adapter
2911 * Reads the PF RSS Mask register and returns its value.
2913 u32 t4_read_rss_pf_mask(struct adapter *adapter)
2917 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2918 &pfmask, 1, A_TP_RSS_PF_MSK);
2923 * t4_write_rss_pf_mask - write PF RSS Mask
2924 * @adapter: the adapter
2925 * @pfmask: PF RSS Mask value
2927 * Writes the specified value to the PF RSS Mask register.
2929 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask)
2931 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2932 &pfmask, 1, A_TP_RSS_PF_MSK);
2936 * t4_set_filter_mode - configure the optional components of filter tuples
2937 * @adap: the adapter
2938 * @mode_map: a bitmap selcting which optional filter components to enable
2940 * Sets the filter mode by selecting the optional components to enable
2941 * in filter tuples. Returns 0 on success and a negative error if the
2942 * requested mode needs more bits than are available for optional
2945 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map)
2947 static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
2951 for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
2952 if (mode_map & (1 << i))
2954 if (nbits > FILTER_OPT_LEN)
2956 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map, 1,
2962 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
2963 * @adap: the adapter
2964 * @v4: holds the TCP/IP counter values
2965 * @v6: holds the TCP/IPv6 counter values
2967 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
2968 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
2970 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
2971 struct tp_tcp_stats *v6)
2973 u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
2975 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
2976 #define STAT(x) val[STAT_IDX(x)]
2977 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
2980 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
2981 ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST);
2982 v4->tcpOutRsts = STAT(OUT_RST);
2983 v4->tcpInSegs = STAT64(IN_SEG);
2984 v4->tcpOutSegs = STAT64(OUT_SEG);
2985 v4->tcpRetransSegs = STAT64(RXT_SEG);
2988 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
2989 ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST);
2990 v6->tcpOutRsts = STAT(OUT_RST);
2991 v6->tcpInSegs = STAT64(IN_SEG);
2992 v6->tcpOutSegs = STAT64(OUT_SEG);
2993 v6->tcpRetransSegs = STAT64(RXT_SEG);
3001 * t4_tp_get_err_stats - read TP's error MIB counters
3002 * @adap: the adapter
3003 * @st: holds the counter values
3005 * Returns the values of TP's error counters.
3007 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
3009 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->macInErrs,
3010 12, A_TP_MIB_MAC_IN_ERR_0);
3011 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlCongDrops,
3012 8, A_TP_MIB_TNL_CNG_DROP_0);
3013 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlTxDrops,
3014 4, A_TP_MIB_TNL_DROP_0);
3015 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->ofldVlanDrops,
3016 4, A_TP_MIB_OFD_VLN_DROP_0);
3017 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tcp6InErrs,
3018 4, A_TP_MIB_TCP_V6IN_ERR_0);
3019 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->ofldNoNeigh,
3020 2, A_TP_MIB_OFD_ARP_DROP);
3024 * t4_tp_get_proxy_stats - read TP's proxy MIB counters
3025 * @adap: the adapter
3026 * @st: holds the counter values
3028 * Returns the values of TP's proxy counters.
3030 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st)
3032 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy,
3033 4, A_TP_MIB_TNL_LPBK_0);
3037 * t4_tp_get_cpl_stats - read TP's CPL MIB counters
3038 * @adap: the adapter
3039 * @st: holds the counter values
3041 * Returns the values of TP's CPL counters.
3043 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
3045 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req,
3046 8, A_TP_MIB_CPL_IN_REQ_0);
3050 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
3051 * @adap: the adapter
3052 * @st: holds the counter values
3054 * Returns the values of TP's RDMA counters.
3056 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
3058 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_mod,
3059 2, A_TP_MIB_RQE_DFR_MOD);
3063 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
3064 * @adap: the adapter
3065 * @idx: the port index
3066 * @st: holds the counter values
3068 * Returns the values of TP's FCoE counters for the selected port.
3070 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
3071 struct tp_fcoe_stats *st)
3075 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDDP,
3076 1, A_TP_MIB_FCOE_DDP_0 + idx);
3077 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDrop,
3078 1, A_TP_MIB_FCOE_DROP_0 + idx);
3079 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
3080 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx);
3081 st->octetsDDP = ((u64)val[0] << 32) | val[1];
3085 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
3086 * @adap: the adapter
3087 * @st: holds the counter values
3089 * Returns the values of TP's counters for non-TCP directly-placed packets.
3091 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
3095 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4,
3097 st->frames = val[0];
3099 st->octets = ((u64)val[2] << 32) | val[3];
3103 * t4_read_mtu_tbl - returns the values in the HW path MTU table
3104 * @adap: the adapter
3105 * @mtus: where to store the MTU values
3106 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
3108 * Reads the HW path MTU table.
3110 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
3115 for (i = 0; i < NMTUS; ++i) {
3116 t4_write_reg(adap, A_TP_MTU_TABLE,
3117 V_MTUINDEX(0xff) | V_MTUVALUE(i));
3118 v = t4_read_reg(adap, A_TP_MTU_TABLE);
3119 mtus[i] = G_MTUVALUE(v);
3121 mtu_log[i] = G_MTUWIDTH(v);
3126 * t4_read_cong_tbl - reads the congestion control table
3127 * @adap: the adapter
3128 * @incr: where to store the alpha values
3130 * Reads the additive increments programmed into the HW congestion
3133 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
3135 unsigned int mtu, w;
3137 for (mtu = 0; mtu < NMTUS; ++mtu)
3138 for (w = 0; w < NCCTRL_WIN; ++w) {
3139 t4_write_reg(adap, A_TP_CCTRL_TABLE,
3140 V_ROWINDEX(0xffff) | (mtu << 5) | w);
3141 incr[mtu][w] = (u16)t4_read_reg(adap,
3142 A_TP_CCTRL_TABLE) & 0x1fff;
3147 * t4_read_pace_tbl - read the pace table
3148 * @adap: the adapter
3149 * @pace_vals: holds the returned values
3151 * Returns the values of TP's pace table in microseconds.
3153 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
3157 for (i = 0; i < NTX_SCHED; i++) {
3158 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
3159 v = t4_read_reg(adap, A_TP_PACE_TABLE);
3160 pace_vals[i] = dack_ticks_to_usec(adap, v);
3165 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
3166 * @adap: the adapter
3167 * @addr: the indirect TP register address
3168 * @mask: specifies the field within the register to modify
3169 * @val: new value for the field
3171 * Sets a field of an indirect TP register to the given value.
3173 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
3174 unsigned int mask, unsigned int val)
3176 t4_write_reg(adap, A_TP_PIO_ADDR, addr);
3177 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
3178 t4_write_reg(adap, A_TP_PIO_DATA, val);
3182 * init_cong_ctrl - initialize congestion control parameters
3183 * @a: the alpha values for congestion control
3184 * @b: the beta values for congestion control
3186 * Initialize the congestion control parameters.
3188 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
3190 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
3215 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
3218 b[13] = b[14] = b[15] = b[16] = 3;
3219 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
3220 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
3225 /* The minimum additive increment value for the congestion control table */
3226 #define CC_MIN_INCR 2U
3229 * t4_load_mtus - write the MTU and congestion control HW tables
3230 * @adap: the adapter
3231 * @mtus: the values for the MTU table
3232 * @alpha: the values for the congestion control alpha parameter
3233 * @beta: the values for the congestion control beta parameter
3235 * Write the HW MTU table with the supplied MTUs and the high-speed
3236 * congestion control table with the supplied alpha, beta, and MTUs.
3237 * We write the two tables together because the additive increments
3238 * depend on the MTUs.
3240 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
3241 const unsigned short *alpha, const unsigned short *beta)
3243 static const unsigned int avg_pkts[NCCTRL_WIN] = {
3244 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
3245 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
3246 28672, 40960, 57344, 81920, 114688, 163840, 229376
3251 for (i = 0; i < NMTUS; ++i) {
3252 unsigned int mtu = mtus[i];
3253 unsigned int log2 = fls(mtu);
3255 if (!(mtu & ((1 << log2) >> 2))) /* round */
3257 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
3258 V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
3260 for (w = 0; w < NCCTRL_WIN; ++w) {
3263 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3266 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3267 (w << 16) | (beta[w] << 13) | inc);
3273 * t4_set_pace_tbl - set the pace table
3274 * @adap: the adapter
3275 * @pace_vals: the pace values in microseconds
3276 * @start: index of the first entry in the HW pace table to set
3277 * @n: how many entries to set
3279 * Sets (a subset of the) HW pace table.
3281 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
3282 unsigned int start, unsigned int n)
3284 unsigned int vals[NTX_SCHED], i;
3285 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
3290 /* convert values from us to dack ticks, rounding to closest value */
3291 for (i = 0; i < n; i++, pace_vals++) {
3292 vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
3293 if (vals[i] > 0x7ff)
3295 if (*pace_vals && vals[i] == 0)
3298 for (i = 0; i < n; i++, start++)
3299 t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
3304 * t4_set_sched_bps - set the bit rate for a HW traffic scheduler
3305 * @adap: the adapter
3306 * @kbps: target rate in Kbps
3307 * @sched: the scheduler index
3309 * Configure a Tx HW scheduler for the target rate.
3311 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
3313 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3314 unsigned int clk = adap->params.vpd.cclk * 1000;
3315 unsigned int selected_cpt = 0, selected_bpt = 0;
3318 kbps *= 125; /* -> bytes */
3319 for (cpt = 1; cpt <= 255; cpt++) {
3321 bpt = (kbps + tps / 2) / tps;
3322 if (bpt > 0 && bpt <= 255) {
3324 delta = v >= kbps ? v - kbps : kbps - v;
3325 if (delta < mindelta) {
3330 } else if (selected_cpt)
3336 t4_write_reg(adap, A_TP_TM_PIO_ADDR,
3337 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3338 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3340 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3342 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3343 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3348 * t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
3349 * @adap: the adapter
3350 * @sched: the scheduler index
3351 * @ipg: the interpacket delay in tenths of nanoseconds
3353 * Set the interpacket delay for a HW packet rate scheduler.
3355 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
3357 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3359 /* convert ipg to nearest number of core clocks */
3360 ipg *= core_ticks_per_usec(adap);
3361 ipg = (ipg + 5000) / 10000;
3362 if (ipg > M_TXTIMERSEPQ0)
3365 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3366 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3368 v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
3370 v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
3371 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3372 t4_read_reg(adap, A_TP_TM_PIO_DATA);
3377 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3378 * @adap: the adapter
3379 * @sched: the scheduler index
3380 * @kbps: the byte rate in Kbps
3381 * @ipg: the interpacket delay in tenths of nanoseconds
3383 * Return the current configuration of a HW Tx scheduler.
3385 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
3388 unsigned int v, addr, bpt, cpt;
3391 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3392 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3393 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3396 bpt = (v >> 8) & 0xff;
3399 *kbps = 0; /* scheduler disabled */
3401 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
3402 *kbps = (v * bpt) / 125;
3406 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3407 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3408 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3412 *ipg = (10000 * v) / core_ticks_per_usec(adap);
3417 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
3418 * clocks. The formula is
3420 * bytes/s = bytes256 * 256 * ClkFreq / 4096
3422 * which is equivalent to
3424 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
3426 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
3428 u64 v = bytes256 * adap->params.vpd.cclk;
3430 return v * 62 + v / 2;
3434 * t4_get_chan_txrate - get the current per channel Tx rates
3435 * @adap: the adapter
3436 * @nic_rate: rates for NIC traffic
3437 * @ofld_rate: rates for offloaded traffic
3439 * Return the current Tx rates in bytes/s for NIC and offloaded traffic
3442 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
3446 v = t4_read_reg(adap, A_TP_TX_TRATE);
3447 nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
3448 nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
3449 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
3450 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
3452 v = t4_read_reg(adap, A_TP_TX_ORATE);
3453 ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
3454 ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
3455 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
3456 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
3460 * t4_set_trace_filter - configure one of the tracing filters
3461 * @adap: the adapter
3462 * @tp: the desired trace filter parameters
3463 * @idx: which filter to configure
3464 * @enable: whether to enable or disable the filter
3466 * Configures one of the tracing filters available in HW. If @enable is
3467 * %0 @tp is not examined and may be %NULL. The user is responsible to
3468 * set the single/multiple trace mode by writing to A_MPS_TRC_CFG register
3469 * by using "cxgbtool iface reg reg_addr=val" command. See t4_sniffer/
3470 * docs/readme.txt for a complete description of how to setup traceing on
3473 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, int idx,
3476 int i, ofst = idx * 4;
3477 u32 data_reg, mask_reg, cfg;
3478 u32 multitrc = F_TRCMULTIFILTER;
3481 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
3486 * TODO - After T4 data book is updated, specify the exact
3489 * See T4 data book - MPS section for a complete description
3490 * of the below if..else handling of A_MPS_TRC_CFG register
3493 cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
3494 if (cfg & F_TRCMULTIFILTER) {
3496 * If multiple tracers are enabled, then maximum
3497 * capture size is 2.5KB (FIFO size of a single channel)
3498 * minus 2 flits for CPL_TRACE_PKT header.
3500 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
3505 * If multiple tracers are disabled, to avoid deadlocks
3506 * maximum packet capture size of 9600 bytes is recommended.
3507 * Also in this mode, only trace0 can be enabled and running.
3510 if (tp->snap_len > 9600 || idx)
3514 if (tp->port > 11 || tp->invert > 1 || tp->skip_len > M_TFLENGTH ||
3515 tp->skip_ofst > M_TFOFFSET || tp->min_len > M_TFMINPKTSIZE)
3518 /* stop the tracer we'll be changing */
3519 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
3521 idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
3522 data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
3523 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
3525 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3526 t4_write_reg(adap, data_reg, tp->data[i]);
3527 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
3529 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
3530 V_TFCAPTUREMAX(tp->snap_len) |
3531 V_TFMINPKTSIZE(tp->min_len));
3532 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
3533 V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) |
3534 V_TFPORT(tp->port) | F_TFEN | V_TFINVERTMATCH(tp->invert));
3540 * t4_get_trace_filter - query one of the tracing filters
3541 * @adap: the adapter
3542 * @tp: the current trace filter parameters
3543 * @idx: which trace filter to query
3544 * @enabled: non-zero if the filter is enabled
3546 * Returns the current settings of one of the HW tracing filters.
3548 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
3552 int i, ofst = idx * 4;
3553 u32 data_reg, mask_reg;
3555 ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
3556 ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
3558 *enabled = !!(ctla & F_TFEN);
3559 tp->snap_len = G_TFCAPTUREMAX(ctlb);
3560 tp->min_len = G_TFMINPKTSIZE(ctlb);
3561 tp->skip_ofst = G_TFOFFSET(ctla);
3562 tp->skip_len = G_TFLENGTH(ctla);
3563 tp->invert = !!(ctla & F_TFINVERTMATCH);
3564 tp->port = G_TFPORT(ctla);
3566 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
3567 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
3568 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
3570 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3571 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
3572 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
3577 * t4_pmtx_get_stats - returns the HW stats from PMTX
3578 * @adap: the adapter
3579 * @cnt: where to store the count statistics
3580 * @cycles: where to store the cycle statistics
3582 * Returns performance statistics from PMTX.
3584 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3588 for (i = 0; i < PM_NSTATS; i++) {
3589 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
3590 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
3591 cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
3596 * t4_pmrx_get_stats - returns the HW stats from PMRX
3597 * @adap: the adapter
3598 * @cnt: where to store the count statistics
3599 * @cycles: where to store the cycle statistics
3601 * Returns performance statistics from PMRX.
3603 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3607 for (i = 0; i < PM_NSTATS; i++) {
3608 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
3609 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
3610 cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
3615 * get_mps_bg_map - return the buffer groups associated with a port
3616 * @adap: the adapter
3617 * @idx: the port index
3619 * Returns a bitmap indicating which MPS buffer groups are associated
3620 * with the given port. Bit i is set if buffer group i is used by the
3623 static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
3625 u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
3628 return idx == 0 ? 0xf : 0;
3630 return idx < 2 ? (3 << (2 * idx)) : 0;
3635 * t4_get_port_stats_offset - collect port stats relative to a previous
3637 * @adap: The adapter
3639 * @stats: Current stats to fill
3640 * @offset: Previous stats snapshot
3642 void t4_get_port_stats_offset(struct adapter *adap, int idx,
3643 struct port_stats *stats,
3644 struct port_stats *offset)
3649 t4_get_port_stats(adap, idx, stats);
3650 for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
3651 i < (sizeof(struct port_stats)/sizeof(u64)) ;
3657 * t4_get_port_stats - collect port statistics
3658 * @adap: the adapter
3659 * @idx: the port index
3660 * @p: the stats structure to fill
3662 * Collect statistics related to the given port from HW.
3664 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
3666 u32 bgmap = get_mps_bg_map(adap, idx);
3668 #define GET_STAT(name) \
3669 t4_read_reg64(adap, PORT_REG(idx, A_MPS_PORT_STAT_##name##_L))
3670 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3672 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
3673 p->tx_octets = GET_STAT(TX_PORT_BYTES);
3674 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
3675 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
3676 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
3677 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
3678 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
3679 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
3680 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
3681 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
3682 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
3683 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
3684 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
3685 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
3686 p->tx_drop = GET_STAT(TX_PORT_DROP);
3687 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
3688 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
3689 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
3690 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
3691 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
3692 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
3693 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
3694 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
3696 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
3697 p->rx_octets = GET_STAT(RX_PORT_BYTES);
3698 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
3699 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
3700 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
3701 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
3702 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
3703 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
3704 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
3705 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
3706 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
3707 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
3708 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
3709 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
3710 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
3711 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
3712 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
3713 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
3714 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
3715 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
3716 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
3717 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
3718 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
3719 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
3720 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
3721 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
3722 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
3724 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
3725 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
3726 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
3727 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
3728 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
3729 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
3730 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
3731 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
3738 * t4_clr_port_stats - clear port statistics
3739 * @adap: the adapter
3740 * @idx: the port index
3742 * Clear HW statistics for the given port.
3744 void t4_clr_port_stats(struct adapter *adap, int idx)
3747 u32 bgmap = get_mps_bg_map(adap, idx);
3749 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
3750 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
3751 t4_write_reg(adap, PORT_REG(idx, i), 0);
3752 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
3753 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
3754 t4_write_reg(adap, PORT_REG(idx, i), 0);
3755 for (i = 0; i < 4; i++)
3756 if (bgmap & (1 << i)) {
3758 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
3760 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
3765 * t4_get_lb_stats - collect loopback port statistics
3766 * @adap: the adapter
3767 * @idx: the loopback port index
3768 * @p: the stats structure to fill
3770 * Return HW statistics for the given loopback port.
3772 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
3774 u32 bgmap = get_mps_bg_map(adap, idx);
3776 #define GET_STAT(name) \
3777 t4_read_reg64(adap, PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L))
3778 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3780 p->octets = GET_STAT(BYTES);
3781 p->frames = GET_STAT(FRAMES);
3782 p->bcast_frames = GET_STAT(BCAST);
3783 p->mcast_frames = GET_STAT(MCAST);
3784 p->ucast_frames = GET_STAT(UCAST);
3785 p->error_frames = GET_STAT(ERROR);
3787 p->frames_64 = GET_STAT(64B);
3788 p->frames_65_127 = GET_STAT(65B_127B);
3789 p->frames_128_255 = GET_STAT(128B_255B);
3790 p->frames_256_511 = GET_STAT(256B_511B);
3791 p->frames_512_1023 = GET_STAT(512B_1023B);
3792 p->frames_1024_1518 = GET_STAT(1024B_1518B);
3793 p->frames_1519_max = GET_STAT(1519B_MAX);
3794 p->drop = t4_read_reg(adap, PORT_REG(idx,
3795 A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES));
3797 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
3798 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
3799 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
3800 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
3801 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
3802 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
3803 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
3804 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
3811 * t4_wol_magic_enable - enable/disable magic packet WoL
3812 * @adap: the adapter
3813 * @port: the physical port index
3814 * @addr: MAC address expected in magic packets, %NULL to disable
3816 * Enables/disables magic packet wake-on-LAN for the selected port.
3818 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
3822 t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO),
3823 (addr[2] << 24) | (addr[3] << 16) |
3824 (addr[4] << 8) | addr[5]);
3825 t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI),
3826 (addr[0] << 8) | addr[1]);
3828 t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2), F_MAGICEN,
3829 V_MAGICEN(addr != NULL));
3833 * t4_wol_pat_enable - enable/disable pattern-based WoL
3834 * @adap: the adapter
3835 * @port: the physical port index
3836 * @map: bitmap of which HW pattern filters to set
3837 * @mask0: byte mask for bytes 0-63 of a packet
3838 * @mask1: byte mask for bytes 64-127 of a packet
3839 * @crc: Ethernet CRC for selected bytes
3840 * @enable: enable/disable switch
3842 * Sets the pattern filters indicated in @map to mask out the bytes
3843 * specified in @mask0/@mask1 in received packets and compare the CRC of
3844 * the resulting packet against @crc. If @enable is %true pattern-based
3845 * WoL is enabled, otherwise disabled.
3847 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
3848 u64 mask0, u64 mask1, unsigned int crc, bool enable)
3853 t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2),
3860 #define EPIO_REG(name) PORT_REG(port, A_XGMAC_PORT_EPIO_##name)
3862 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
3863 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
3864 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
3866 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
3870 /* write byte masks */
3871 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
3872 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
3873 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
3874 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
3878 t4_write_reg(adap, EPIO_REG(DATA0), crc);
3879 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
3880 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
3881 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
3886 t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2), 0, F_PATEN);
3891 * t4_mk_filtdelwr - create a delete filter WR
3892 * @ftid: the filter ID
3893 * @wr: the filter work request to populate
3894 * @qid: ingress queue to receive the delete notification
3896 * Creates a filter work request to delete the supplied filter. If @qid is
3897 * negative the delete notification is suppressed.
3899 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
3901 memset(wr, 0, sizeof(*wr));
3902 wr->op_pkd = htonl(V_FW_WR_OP(FW_FILTER_WR));
3903 wr->len16_pkd = htonl(V_FW_WR_LEN16(sizeof(*wr) / 16));
3904 wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
3905 V_FW_FILTER_WR_NOREPLY(qid < 0));
3906 wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
3908 wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
3911 #define INIT_CMD(var, cmd, rd_wr) do { \
3912 (var).op_to_write = htonl(V_FW_CMD_OP(FW_##cmd##_CMD) | \
3913 F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
3914 (var).retval_len16 = htonl(FW_LEN16(var)); \
3917 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, u32 addr, u32 val)
3919 struct fw_ldst_cmd c;
3921 memset(&c, 0, sizeof(c));
3922 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3923 F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
3924 c.cycles_to_len16 = htonl(FW_LEN16(c));
3925 c.u.addrval.addr = htonl(addr);
3926 c.u.addrval.val = htonl(val);
3928 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3932 * t4_i2c_rd - read a byte from an i2c addressable device
3933 * @adap: the adapter
3934 * @mbox: mailbox to use for the FW command
3935 * @port_id: the port id
3936 * @dev_addr: the i2c device address
3937 * @offset: the byte offset to read from
3938 * @valp: where to store the value
3940 int t4_i2c_rd(struct adapter *adap, unsigned int mbox, unsigned int port_id,
3941 u8 dev_addr, u8 offset, u8 *valp)
3944 struct fw_ldst_cmd c;
3946 memset(&c, 0, sizeof(c));
3947 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3949 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_I2C));
3950 c.cycles_to_len16 = htonl(FW_LEN16(c));
3951 c.u.i2c_deprecated.pid_pkd = V_FW_LDST_CMD_PID(port_id);
3952 c.u.i2c_deprecated.base = dev_addr;
3953 c.u.i2c_deprecated.boffset = offset;
3955 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3957 *valp = c.u.i2c_deprecated.data;
3962 * t4_mdio_rd - read a PHY register through MDIO
3963 * @adap: the adapter
3964 * @mbox: mailbox to use for the FW command
3965 * @phy_addr: the PHY address
3966 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
3967 * @reg: the register to read
3968 * @valp: where to store the value
3970 * Issues a FW command through the given mailbox to read a PHY register.
3972 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
3973 unsigned int mmd, unsigned int reg, unsigned int *valp)
3976 struct fw_ldst_cmd c;
3978 memset(&c, 0, sizeof(c));
3979 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3980 F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
3981 c.cycles_to_len16 = htonl(FW_LEN16(c));
3982 c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
3983 V_FW_LDST_CMD_MMD(mmd));
3984 c.u.mdio.raddr = htons(reg);
3986 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3988 *valp = ntohs(c.u.mdio.rval);
3993 * t4_mdio_wr - write a PHY register through MDIO
3994 * @adap: the adapter
3995 * @mbox: mailbox to use for the FW command
3996 * @phy_addr: the PHY address
3997 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
3998 * @reg: the register to write
3999 * @valp: value to write
4001 * Issues a FW command through the given mailbox to write a PHY register.
4003 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
4004 unsigned int mmd, unsigned int reg, unsigned int val)
4006 struct fw_ldst_cmd c;
4008 memset(&c, 0, sizeof(c));
4009 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4010 F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
4011 c.cycles_to_len16 = htonl(FW_LEN16(c));
4012 c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
4013 V_FW_LDST_CMD_MMD(mmd));
4014 c.u.mdio.raddr = htons(reg);
4015 c.u.mdio.rval = htons(val);
4017 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4021 * t4_sge_ctxt_flush - flush the SGE context cache
4022 * @adap: the adapter
4023 * @mbox: mailbox to use for the FW command
4025 * Issues a FW command through the given mailbox to flush the
4026 * SGE context cache.
4028 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
4031 struct fw_ldst_cmd c;
4033 memset(&c, 0, sizeof(c));
4034 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4036 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC));
4037 c.cycles_to_len16 = htonl(FW_LEN16(c));
4038 c.u.idctxt.msg_ctxtflush = htonl(F_FW_LDST_CMD_CTXTFLUSH);
4040 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4045 * t4_sge_ctxt_rd - read an SGE context through FW
4046 * @adap: the adapter
4047 * @mbox: mailbox to use for the FW command
4048 * @cid: the context id
4049 * @ctype: the context type
4050 * @data: where to store the context data
4052 * Issues a FW command through the given mailbox to read an SGE context.
4054 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
4055 enum ctxt_type ctype, u32 *data)
4058 struct fw_ldst_cmd c;
4060 if (ctype == CTXT_EGRESS)
4061 ret = FW_LDST_ADDRSPC_SGE_EGRC;
4062 else if (ctype == CTXT_INGRESS)
4063 ret = FW_LDST_ADDRSPC_SGE_INGC;
4064 else if (ctype == CTXT_FLM)
4065 ret = FW_LDST_ADDRSPC_SGE_FLMC;
4067 ret = FW_LDST_ADDRSPC_SGE_CONMC;
4069 memset(&c, 0, sizeof(c));
4070 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4071 F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(ret));
4072 c.cycles_to_len16 = htonl(FW_LEN16(c));
4073 c.u.idctxt.physid = htonl(cid);
4075 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4077 data[0] = ntohl(c.u.idctxt.ctxt_data0);
4078 data[1] = ntohl(c.u.idctxt.ctxt_data1);
4079 data[2] = ntohl(c.u.idctxt.ctxt_data2);
4080 data[3] = ntohl(c.u.idctxt.ctxt_data3);
4081 data[4] = ntohl(c.u.idctxt.ctxt_data4);
4082 data[5] = ntohl(c.u.idctxt.ctxt_data5);
4088 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
4089 * @adap: the adapter
4090 * @cid: the context id
4091 * @ctype: the context type
4092 * @data: where to store the context data
4094 * Reads an SGE context directly, bypassing FW. This is only for
4095 * debugging when FW is unavailable.
4097 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
4102 t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
4103 ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
4105 for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
4106 *data++ = t4_read_reg(adap, i);
4111 * t4_fw_hello - establish communication with FW
4112 * @adap: the adapter
4113 * @mbox: mailbox to use for the FW command
4114 * @evt_mbox: mailbox to receive async FW events
4115 * @master: specifies the caller's willingness to be the device master
4116 * @state: returns the current device state (if non-NULL)
4118 * Issues a command to establish communication with FW. Returns either
4119 * an error (negative integer) or the mailbox of the Master PF.
4121 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
4122 enum dev_master master, enum dev_state *state)
4125 struct fw_hello_cmd c;
4127 unsigned int master_mbox;
4128 int retries = FW_CMD_HELLO_RETRIES;
4131 memset(&c, 0, sizeof(c));
4132 INIT_CMD(c, HELLO, WRITE);
4133 c.err_to_clearinit = htonl(
4134 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
4135 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
4136 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
4137 M_FW_HELLO_CMD_MBMASTER) |
4138 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
4139 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
4140 F_FW_HELLO_CMD_CLEARINIT);
4143 * Issue the HELLO command to the firmware. If it's not successful
4144 * but indicates that we got a "busy" or "timeout" condition, retry
4145 * the HELLO until we exhaust our retry limit. If we do exceed our
4146 * retry limit, check to see if the firmware left us any error
4147 * information and report that if so ...
4149 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4150 if (ret != FW_SUCCESS) {
4151 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
4153 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
4154 t4_report_fw_error(adap);
4158 v = ntohl(c.err_to_clearinit);
4159 master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
4161 if (v & F_FW_HELLO_CMD_ERR)
4162 *state = DEV_STATE_ERR;
4163 else if (v & F_FW_HELLO_CMD_INIT)
4164 *state = DEV_STATE_INIT;
4166 *state = DEV_STATE_UNINIT;
4170 * If we're not the Master PF then we need to wait around for the
4171 * Master PF Driver to finish setting up the adapter.
4173 * Note that we also do this wait if we're a non-Master-capable PF and
4174 * there is no current Master PF; a Master PF may show up momentarily
4175 * and we wouldn't want to fail pointlessly. (This can happen when an
4176 * OS loads lots of different drivers rapidly at the same time). In
4177 * this case, the Master PF returned by the firmware will be
4178 * M_PCIE_FW_MASTER so the test below will work ...
4180 if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
4181 master_mbox != mbox) {
4182 int waiting = FW_CMD_HELLO_TIMEOUT;
4185 * Wait for the firmware to either indicate an error or
4186 * initialized state. If we see either of these we bail out
4187 * and report the issue to the caller. If we exhaust the
4188 * "hello timeout" and we haven't exhausted our retries, try
4189 * again. Otherwise bail with a timeout error.
4198 * If neither Error nor Initialialized are indicated
4199 * by the firmware keep waiting till we exhaust our
4200 * timeout ... and then retry if we haven't exhausted
4203 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
4204 if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
4215 * We either have an Error or Initialized condition
4216 * report errors preferentially.
4219 if (pcie_fw & F_PCIE_FW_ERR)
4220 *state = DEV_STATE_ERR;
4221 else if (pcie_fw & F_PCIE_FW_INIT)
4222 *state = DEV_STATE_INIT;
4226 * If we arrived before a Master PF was selected and
4227 * there's not a valid Master PF, grab its identity
4230 if (master_mbox == M_PCIE_FW_MASTER &&
4231 (pcie_fw & F_PCIE_FW_MASTER_VLD))
4232 master_mbox = G_PCIE_FW_MASTER(pcie_fw);
4241 * t4_fw_bye - end communication with FW
4242 * @adap: the adapter
4243 * @mbox: mailbox to use for the FW command
4245 * Issues a command to terminate communication with FW.
4247 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
4249 struct fw_bye_cmd c;
4251 memset(&c, 0, sizeof(c));
4252 INIT_CMD(c, BYE, WRITE);
4253 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4257 * t4_fw_reset - issue a reset to FW
4258 * @adap: the adapter
4259 * @mbox: mailbox to use for the FW command
4260 * @reset: specifies the type of reset to perform
4262 * Issues a reset command of the specified type to FW.
4264 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
4266 struct fw_reset_cmd c;
4268 memset(&c, 0, sizeof(c));
4269 INIT_CMD(c, RESET, WRITE);
4270 c.val = htonl(reset);
4271 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4275 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
4276 * @adap: the adapter
4277 * @mbox: mailbox to use for the FW RESET command (if desired)
4278 * @force: force uP into RESET even if FW RESET command fails
4280 * Issues a RESET command to firmware (if desired) with a HALT indication
4281 * and then puts the microprocessor into RESET state. The RESET command
4282 * will only be issued if a legitimate mailbox is provided (mbox <=
4283 * M_PCIE_FW_MASTER).
4285 * This is generally used in order for the host to safely manipulate the
4286 * adapter without fear of conflicting with whatever the firmware might
4287 * be doing. The only way out of this state is to RESTART the firmware
4290 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
4295 * If a legitimate mailbox is provided, issue a RESET command
4296 * with a HALT indication.
4298 if (mbox <= M_PCIE_FW_MASTER) {
4299 struct fw_reset_cmd c;
4301 memset(&c, 0, sizeof(c));
4302 INIT_CMD(c, RESET, WRITE);
4303 c.val = htonl(F_PIORST | F_PIORSTMODE);
4304 c.halt_pkd = htonl(F_FW_RESET_CMD_HALT);
4305 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4309 * Normally we won't complete the operation if the firmware RESET
4310 * command fails but if our caller insists we'll go ahead and put the
4311 * uP into RESET. This can be useful if the firmware is hung or even
4312 * missing ... We'll have to take the risk of putting the uP into
4313 * RESET without the cooperation of firmware in that case.
4315 * We also force the firmware's HALT flag to be on in case we bypassed
4316 * the firmware RESET command above or we're dealing with old firmware
4317 * which doesn't have the HALT capability. This will serve as a flag
4318 * for the incoming firmware to know that it's coming out of a HALT
4319 * rather than a RESET ... if it's new enough to understand that ...
4321 if (ret == 0 || force) {
4322 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
4323 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, F_PCIE_FW_HALT);
4327 * And we always return the result of the firmware RESET command
4328 * even when we force the uP into RESET ...
4334 * t4_fw_restart - restart the firmware by taking the uP out of RESET
4335 * @adap: the adapter
4336 * @reset: if we want to do a RESET to restart things
4338 * Restart firmware previously halted by t4_fw_halt(). On successful
4339 * return the previous PF Master remains as the new PF Master and there
4340 * is no need to issue a new HELLO command, etc.
4342 * We do this in two ways:
4344 * 1. If we're dealing with newer firmware we'll simply want to take
4345 * the chip's microprocessor out of RESET. This will cause the
4346 * firmware to start up from its start vector. And then we'll loop
4347 * until the firmware indicates it's started again (PCIE_FW.HALT
4348 * reset to 0) or we timeout.
4350 * 2. If we're dealing with older firmware then we'll need to RESET
4351 * the chip since older firmware won't recognize the PCIE_FW.HALT
4352 * flag and automatically RESET itself on startup.
4354 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
4358 * Since we're directing the RESET instead of the firmware
4359 * doing it automatically, we need to clear the PCIE_FW.HALT
4362 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
4365 * If we've been given a valid mailbox, first try to get the
4366 * firmware to do the RESET. If that works, great and we can
4367 * return success. Otherwise, if we haven't been given a
4368 * valid mailbox or the RESET command failed, fall back to
4369 * hitting the chip with a hammer.
4371 if (mbox <= M_PCIE_FW_MASTER) {
4372 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
4374 if (t4_fw_reset(adap, mbox,
4375 F_PIORST | F_PIORSTMODE) == 0)
4379 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
4384 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
4385 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
4386 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
4397 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
4398 * @adap: the adapter
4399 * @mbox: mailbox to use for the FW RESET command (if desired)
4400 * @fw_data: the firmware image to write
4402 * @force: force upgrade even if firmware doesn't cooperate
4404 * Perform all of the steps necessary for upgrading an adapter's
4405 * firmware image. Normally this requires the cooperation of the
4406 * existing firmware in order to halt all existing activities
4407 * but if an invalid mailbox token is passed in we skip that step
4408 * (though we'll still put the adapter microprocessor into RESET in
4411 * On successful return the new firmware will have been loaded and
4412 * the adapter will have been fully RESET losing all previous setup
4413 * state. On unsuccessful return the adapter may be completely hosed ...
4414 * positive errno indicates that the adapter is ~probably~ intact, a
4415 * negative errno indicates that things are looking bad ...
4417 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
4418 const u8 *fw_data, unsigned int size, int force)
4420 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
4423 ret = t4_fw_halt(adap, mbox, force);
4424 if (ret < 0 && !force)
4427 ret = t4_load_fw(adap, fw_data, size);
4432 * Older versions of the firmware don't understand the new
4433 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
4434 * restart. So for newly loaded older firmware we'll have to do the
4435 * RESET for it so it starts up on a clean slate. We can tell if
4436 * the newly loaded firmware will handle this right by checking
4437 * its header flags to see if it advertises the capability.
4439 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
4440 return t4_fw_restart(adap, mbox, reset);
4444 * t4_fw_initialize - ask FW to initialize the device
4445 * @adap: the adapter
4446 * @mbox: mailbox to use for the FW command
4448 * Issues a command to FW to partially initialize the device. This
4449 * performs initialization that generally doesn't depend on user input.
4451 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
4453 struct fw_initialize_cmd c;
4455 memset(&c, 0, sizeof(c));
4456 INIT_CMD(c, INITIALIZE, WRITE);
4457 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4461 * t4_query_params - query FW or device parameters
4462 * @adap: the adapter
4463 * @mbox: mailbox to use for the FW command
4466 * @nparams: the number of parameters
4467 * @params: the parameter names
4468 * @val: the parameter values
4470 * Reads the value of FW or device parameters. Up to 7 parameters can be
4473 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4474 unsigned int vf, unsigned int nparams, const u32 *params,
4478 struct fw_params_cmd c;
4479 __be32 *p = &c.param[0].mnem;
4484 memset(&c, 0, sizeof(c));
4485 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4486 F_FW_CMD_READ | V_FW_PARAMS_CMD_PFN(pf) |
4487 V_FW_PARAMS_CMD_VFN(vf));
4488 c.retval_len16 = htonl(FW_LEN16(c));
4490 for (i = 0; i < nparams; i++, p += 2)
4491 *p = htonl(*params++);
4493 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4495 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
4501 * t4_set_params - sets FW or device parameters
4502 * @adap: the adapter
4503 * @mbox: mailbox to use for the FW command
4506 * @nparams: the number of parameters
4507 * @params: the parameter names
4508 * @val: the parameter values
4510 * Sets the value of FW or device parameters. Up to 7 parameters can be
4511 * specified at once.
4513 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4514 unsigned int vf, unsigned int nparams, const u32 *params,
4517 struct fw_params_cmd c;
4518 __be32 *p = &c.param[0].mnem;
4523 memset(&c, 0, sizeof(c));
4524 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4525 F_FW_CMD_WRITE | V_FW_PARAMS_CMD_PFN(pf) |
4526 V_FW_PARAMS_CMD_VFN(vf));
4527 c.retval_len16 = htonl(FW_LEN16(c));
4530 *p++ = htonl(*params++);
4531 *p++ = htonl(*val++);
4534 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4538 * t4_cfg_pfvf - configure PF/VF resource limits
4539 * @adap: the adapter
4540 * @mbox: mailbox to use for the FW command
4541 * @pf: the PF being configured
4542 * @vf: the VF being configured
4543 * @txq: the max number of egress queues
4544 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
4545 * @rxqi: the max number of interrupt-capable ingress queues
4546 * @rxq: the max number of interruptless ingress queues
4547 * @tc: the PCI traffic class
4548 * @vi: the max number of virtual interfaces
4549 * @cmask: the channel access rights mask for the PF/VF
4550 * @pmask: the port access rights mask for the PF/VF
4551 * @nexact: the maximum number of exact MPS filters
4552 * @rcaps: read capabilities
4553 * @wxcaps: write/execute capabilities
4555 * Configures resource limits and capabilities for a physical or virtual
4558 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
4559 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
4560 unsigned int rxqi, unsigned int rxq, unsigned int tc,
4561 unsigned int vi, unsigned int cmask, unsigned int pmask,
4562 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
4564 struct fw_pfvf_cmd c;
4566 memset(&c, 0, sizeof(c));
4567 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
4568 F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
4569 V_FW_PFVF_CMD_VFN(vf));
4570 c.retval_len16 = htonl(FW_LEN16(c));
4571 c.niqflint_niq = htonl(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
4572 V_FW_PFVF_CMD_NIQ(rxq));
4573 c.type_to_neq = htonl(V_FW_PFVF_CMD_CMASK(cmask) |
4574 V_FW_PFVF_CMD_PMASK(pmask) |
4575 V_FW_PFVF_CMD_NEQ(txq));
4576 c.tc_to_nexactf = htonl(V_FW_PFVF_CMD_TC(tc) | V_FW_PFVF_CMD_NVI(vi) |
4577 V_FW_PFVF_CMD_NEXACTF(nexact));
4578 c.r_caps_to_nethctrl = htonl(V_FW_PFVF_CMD_R_CAPS(rcaps) |
4579 V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
4580 V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
4581 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4585 * t4_alloc_vi_func - allocate a virtual interface
4586 * @adap: the adapter
4587 * @mbox: mailbox to use for the FW command
4588 * @port: physical port associated with the VI
4589 * @pf: the PF owning the VI
4590 * @vf: the VF owning the VI
4591 * @nmac: number of MAC addresses needed (1 to 5)
4592 * @mac: the MAC addresses of the VI
4593 * @rss_size: size of RSS table slice associated with this VI
4594 * @portfunc: which Port Application Function MAC Address is desired
4595 * @idstype: Intrusion Detection Type
4597 * Allocates a virtual interface for the given physical port. If @mac is
4598 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
4599 * @mac should be large enough to hold @nmac Ethernet addresses, they are
4600 * stored consecutively so the space needed is @nmac * 6 bytes.
4601 * Returns a negative error number or the non-negative VI id.
4603 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
4604 unsigned int port, unsigned int pf, unsigned int vf,
4605 unsigned int nmac, u8 *mac, unsigned int *rss_size,
4606 unsigned int portfunc, unsigned int idstype)
4611 memset(&c, 0, sizeof(c));
4612 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
4613 F_FW_CMD_WRITE | F_FW_CMD_EXEC |
4614 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
4615 c.alloc_to_len16 = htonl(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
4616 c.type_to_viid = htons(V_FW_VI_CMD_TYPE(idstype) |
4617 V_FW_VI_CMD_FUNC(portfunc));
4618 c.portid_pkd = V_FW_VI_CMD_PORTID(port);
4621 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4626 memcpy(mac, c.mac, sizeof(c.mac));
4629 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
4631 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
4633 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
4635 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
4639 *rss_size = G_FW_VI_CMD_RSSSIZE(ntohs(c.norss_rsssize));
4640 return G_FW_VI_CMD_VIID(htons(c.type_to_viid));
4644 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface
4645 * @adap: the adapter
4646 * @mbox: mailbox to use for the FW command
4647 * @port: physical port associated with the VI
4648 * @pf: the PF owning the VI
4649 * @vf: the VF owning the VI
4650 * @nmac: number of MAC addresses needed (1 to 5)
4651 * @mac: the MAC addresses of the VI
4652 * @rss_size: size of RSS table slice associated with this VI
4654 * backwards compatible and convieniance routine to allocate a Virtual
4655 * Interface with a Ethernet Port Application Function and Intrustion
4656 * Detection System disabled.
4658 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
4659 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
4660 unsigned int *rss_size)
4662 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
4667 * t4_free_vi - free a virtual interface
4668 * @adap: the adapter
4669 * @mbox: mailbox to use for the FW command
4670 * @pf: the PF owning the VI
4671 * @vf: the VF owning the VI
4672 * @viid: virtual interface identifiler
4674 * Free a previously allocated virtual interface.
4676 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
4677 unsigned int vf, unsigned int viid)
4681 memset(&c, 0, sizeof(c));
4682 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) |
4685 V_FW_VI_CMD_PFN(pf) |
4686 V_FW_VI_CMD_VFN(vf));
4687 c.alloc_to_len16 = htonl(F_FW_VI_CMD_FREE | FW_LEN16(c));
4688 c.type_to_viid = htons(V_FW_VI_CMD_VIID(viid));
4690 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4694 * t4_set_rxmode - set Rx properties of a virtual interface
4695 * @adap: the adapter
4696 * @mbox: mailbox to use for the FW command
4698 * @mtu: the new MTU or -1
4699 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
4700 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
4701 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
4702 * @vlanex: 1 to enable HVLAN extraction, 0 to disable it, -1 no change
4703 * @sleep_ok: if true we may sleep while awaiting command completion
4705 * Sets Rx properties of a virtual interface.
4707 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
4708 int mtu, int promisc, int all_multi, int bcast, int vlanex,
4711 struct fw_vi_rxmode_cmd c;
4713 /* convert to FW values */
4715 mtu = M_FW_VI_RXMODE_CMD_MTU;
4717 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
4719 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
4721 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
4723 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
4725 memset(&c, 0, sizeof(c));
4726 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | F_FW_CMD_REQUEST |
4727 F_FW_CMD_WRITE | V_FW_VI_RXMODE_CMD_VIID(viid));
4728 c.retval_len16 = htonl(FW_LEN16(c));
4729 c.mtu_to_vlanexen = htonl(V_FW_VI_RXMODE_CMD_MTU(mtu) |
4730 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
4731 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
4732 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
4733 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
4734 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
4738 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
4739 * @adap: the adapter
4740 * @mbox: mailbox to use for the FW command
4742 * @free: if true any existing filters for this VI id are first removed
4743 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
4744 * @addr: the MAC address(es)
4745 * @idx: where to store the index of each allocated filter
4746 * @hash: pointer to hash address filter bitmap
4747 * @sleep_ok: call is allowed to sleep
4749 * Allocates an exact-match filter for each of the supplied addresses and
4750 * sets it to the corresponding address. If @idx is not %NULL it should
4751 * have at least @naddr entries, each of which will be set to the index of
4752 * the filter allocated for the corresponding MAC address. If a filter
4753 * could not be allocated for an address its index is set to 0xffff.
4754 * If @hash is not %NULL addresses that fail to allocate an exact filter
4755 * are hashed and update the hash filter bitmap pointed at by @hash.
4757 * Returns a negative error number or the number of filters allocated.
4759 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
4760 unsigned int viid, bool free, unsigned int naddr,
4761 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
4763 int offset, ret = 0;
4764 struct fw_vi_mac_cmd c;
4765 unsigned int nfilters = 0;
4766 unsigned int rem = naddr;
4768 if (naddr > NUM_MPS_CLS_SRAM_L_INSTANCES)
4771 for (offset = 0; offset < naddr ; /**/) {
4772 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
4774 : ARRAY_SIZE(c.u.exact));
4775 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
4776 u.exact[fw_naddr]), 16);
4777 struct fw_vi_mac_exact *p;
4780 memset(&c, 0, sizeof(c));
4781 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) |
4784 V_FW_CMD_EXEC(free) |
4785 V_FW_VI_MAC_CMD_VIID(viid));
4786 c.freemacs_to_len16 = htonl(V_FW_VI_MAC_CMD_FREEMACS(free) |
4787 V_FW_CMD_LEN16(len16));
4789 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
4790 p->valid_to_idx = htons(
4791 F_FW_VI_MAC_CMD_VALID |
4792 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
4793 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
4797 * It's okay if we run out of space in our MAC address arena.
4798 * Some of the addresses we submit may get stored so we need
4799 * to run through the reply to see what the results were ...
4801 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
4802 if (ret && ret != -FW_ENOMEM)
4805 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
4806 u16 index = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
4809 idx[offset+i] = (index >= NUM_MPS_CLS_SRAM_L_INSTANCES
4812 if (index < NUM_MPS_CLS_SRAM_L_INSTANCES)
4815 *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
4823 if (ret == 0 || ret == -FW_ENOMEM)
4829 * t4_change_mac - modifies the exact-match filter for a MAC address
4830 * @adap: the adapter
4831 * @mbox: mailbox to use for the FW command
4833 * @idx: index of existing filter for old value of MAC address, or -1
4834 * @addr: the new MAC address value
4835 * @persist: whether a new MAC allocation should be persistent
4836 * @add_smt: if true also add the address to the HW SMT
4838 * Modifies an exact-match filter and sets it to the new MAC address if
4839 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
4840 * latter case the address is added persistently if @persist is %true.
4842 * Note that in general it is not possible to modify the value of a given
4843 * filter so the generic way to modify an address filter is to free the one
4844 * being used by the old address value and allocate a new filter for the
4845 * new address value.
4847 * Returns a negative error number or the index of the filter with the new
4848 * MAC value. Note that this index may differ from @idx.
4850 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
4851 int idx, const u8 *addr, bool persist, bool add_smt)
4854 struct fw_vi_mac_cmd c;
4855 struct fw_vi_mac_exact *p = c.u.exact;
4857 if (idx < 0) /* new allocation */
4858 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
4859 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
4861 memset(&c, 0, sizeof(c));
4862 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
4863 F_FW_CMD_WRITE | V_FW_VI_MAC_CMD_VIID(viid));
4864 c.freemacs_to_len16 = htonl(V_FW_CMD_LEN16(1));
4865 p->valid_to_idx = htons(F_FW_VI_MAC_CMD_VALID |
4866 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
4867 V_FW_VI_MAC_CMD_IDX(idx));
4868 memcpy(p->macaddr, addr, sizeof(p->macaddr));
4870 ret = t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), &c);
4872 ret = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
4873 if (ret >= NUM_MPS_CLS_SRAM_L_INSTANCES)
4880 * t4_set_addr_hash - program the MAC inexact-match hash filter
4881 * @adap: the adapter
4882 * @mbox: mailbox to use for the FW command
4884 * @ucast: whether the hash filter should also match unicast addresses
4885 * @vec: the value to be written to the hash filter
4886 * @sleep_ok: call is allowed to sleep
4888 * Sets the 64-bit inexact-match hash filter for a virtual interface.
4890 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
4891 bool ucast, u64 vec, bool sleep_ok)
4893 struct fw_vi_mac_cmd c;
4895 memset(&c, 0, sizeof(c));
4896 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
4897 F_FW_CMD_WRITE | V_FW_VI_ENABLE_CMD_VIID(viid));
4898 c.freemacs_to_len16 = htonl(F_FW_VI_MAC_CMD_HASHVECEN |
4899 V_FW_VI_MAC_CMD_HASHUNIEN(ucast) |
4901 c.u.hash.hashvec = cpu_to_be64(vec);
4902 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
4906 * t4_enable_vi - enable/disable a virtual interface
4907 * @adap: the adapter
4908 * @mbox: mailbox to use for the FW command
4910 * @rx_en: 1=enable Rx, 0=disable Rx
4911 * @tx_en: 1=enable Tx, 0=disable Tx
4913 * Enables/disables a virtual interface.
4915 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
4916 bool rx_en, bool tx_en)
4918 struct fw_vi_enable_cmd c;
4920 memset(&c, 0, sizeof(c));
4921 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
4922 F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
4923 c.ien_to_len16 = htonl(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
4924 V_FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
4925 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4929 * t4_identify_port - identify a VI's port by blinking its LED
4930 * @adap: the adapter
4931 * @mbox: mailbox to use for the FW command
4933 * @nblinks: how many times to blink LED at 2.5 Hz
4935 * Identifies a VI's port by blinking its LED.
4937 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
4938 unsigned int nblinks)
4940 struct fw_vi_enable_cmd c;
4942 memset(&c, 0, sizeof(c));
4943 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
4944 F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
4945 c.ien_to_len16 = htonl(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
4946 c.blinkdur = htons(nblinks);
4947 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4951 * t4_iq_start_stop - enable/disable an ingress queue and its FLs
4952 * @adap: the adapter
4953 * @mbox: mailbox to use for the FW command
4954 * @start: %true to enable the queues, %false to disable them
4955 * @pf: the PF owning the queues
4956 * @vf: the VF owning the queues
4957 * @iqid: ingress queue id
4958 * @fl0id: FL0 queue id or 0xffff if no attached FL0
4959 * @fl1id: FL1 queue id or 0xffff if no attached FL1
4961 * Starts or stops an ingress queue and its associated FLs, if any.
4963 int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
4964 unsigned int pf, unsigned int vf, unsigned int iqid,
4965 unsigned int fl0id, unsigned int fl1id)
4969 memset(&c, 0, sizeof(c));
4970 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
4971 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
4972 V_FW_IQ_CMD_VFN(vf));
4973 c.alloc_to_len16 = htonl(V_FW_IQ_CMD_IQSTART(start) |
4974 V_FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
4975 c.iqid = htons(iqid);
4976 c.fl0id = htons(fl0id);
4977 c.fl1id = htons(fl1id);
4978 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4982 * t4_iq_free - free an ingress queue and its FLs
4983 * @adap: the adapter
4984 * @mbox: mailbox to use for the FW command
4985 * @pf: the PF owning the queues
4986 * @vf: the VF owning the queues
4987 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
4988 * @iqid: ingress queue id
4989 * @fl0id: FL0 queue id or 0xffff if no attached FL0
4990 * @fl1id: FL1 queue id or 0xffff if no attached FL1
4992 * Frees an ingress queue and its associated FLs, if any.
4994 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4995 unsigned int vf, unsigned int iqtype, unsigned int iqid,
4996 unsigned int fl0id, unsigned int fl1id)
5000 memset(&c, 0, sizeof(c));
5001 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
5002 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
5003 V_FW_IQ_CMD_VFN(vf));
5004 c.alloc_to_len16 = htonl(F_FW_IQ_CMD_FREE | FW_LEN16(c));
5005 c.type_to_iqandstindex = htonl(V_FW_IQ_CMD_TYPE(iqtype));
5006 c.iqid = htons(iqid);
5007 c.fl0id = htons(fl0id);
5008 c.fl1id = htons(fl1id);
5009 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5013 * t4_eth_eq_free - free an Ethernet egress queue
5014 * @adap: the adapter
5015 * @mbox: mailbox to use for the FW command
5016 * @pf: the PF owning the queue
5017 * @vf: the VF owning the queue
5018 * @eqid: egress queue id
5020 * Frees an Ethernet egress queue.
5022 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5023 unsigned int vf, unsigned int eqid)
5025 struct fw_eq_eth_cmd c;
5027 memset(&c, 0, sizeof(c));
5028 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
5029 F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(pf) |
5030 V_FW_EQ_ETH_CMD_VFN(vf));
5031 c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
5032 c.eqid_pkd = htonl(V_FW_EQ_ETH_CMD_EQID(eqid));
5033 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5037 * t4_ctrl_eq_free - free a control egress queue
5038 * @adap: the adapter
5039 * @mbox: mailbox to use for the FW command
5040 * @pf: the PF owning the queue
5041 * @vf: the VF owning the queue
5042 * @eqid: egress queue id
5044 * Frees a control egress queue.
5046 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5047 unsigned int vf, unsigned int eqid)
5049 struct fw_eq_ctrl_cmd c;
5051 memset(&c, 0, sizeof(c));
5052 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
5053 F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(pf) |
5054 V_FW_EQ_CTRL_CMD_VFN(vf));
5055 c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
5056 c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_EQID(eqid));
5057 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5061 * t4_ofld_eq_free - free an offload egress queue
5062 * @adap: the adapter
5063 * @mbox: mailbox to use for the FW command
5064 * @pf: the PF owning the queue
5065 * @vf: the VF owning the queue
5066 * @eqid: egress queue id
5068 * Frees a control egress queue.
5070 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5071 unsigned int vf, unsigned int eqid)
5073 struct fw_eq_ofld_cmd c;
5075 memset(&c, 0, sizeof(c));
5076 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
5077 F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(pf) |
5078 V_FW_EQ_OFLD_CMD_VFN(vf));
5079 c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
5080 c.eqid_pkd = htonl(V_FW_EQ_OFLD_CMD_EQID(eqid));
5081 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5085 * t4_handle_fw_rpl - process a FW reply message
5086 * @adap: the adapter
5087 * @rpl: start of the FW message
5089 * Processes a FW message, such as link state change messages.
5091 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
5093 u8 opcode = *(const u8 *)rpl;
5094 const struct fw_port_cmd *p = (const void *)rpl;
5095 unsigned int action = G_FW_PORT_CMD_ACTION(ntohl(p->action_to_len16));
5097 if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
5098 /* link/module state change message */
5099 int speed = 0, fc = 0, i;
5100 int chan = G_FW_PORT_CMD_PORTID(ntohl(p->op_to_portid));
5101 struct port_info *pi = NULL;
5102 struct link_config *lc;
5103 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
5104 int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
5105 u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
5107 if (stat & F_FW_PORT_CMD_RXPAUSE)
5109 if (stat & F_FW_PORT_CMD_TXPAUSE)
5111 if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
5113 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
5115 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
5116 speed = SPEED_10000;
5118 for_each_port(adap, i) {
5119 pi = adap2pinfo(adap, i);
5120 if (pi->tx_chan == chan)
5125 if (link_ok != lc->link_ok || speed != lc->speed ||
5126 fc != lc->fc) { /* something changed */
5127 lc->link_ok = link_ok;
5130 t4_os_link_changed(adap, i, link_ok);
5132 if (mod != pi->mod_type) {
5134 t4_os_portmod_changed(adap, i);
5137 CH_WARN_RATELIMIT(adap,
5138 "Unknown firmware reply 0x%x (0x%x)\n", opcode, action);
5145 * get_pci_mode - determine a card's PCI mode
5146 * @adapter: the adapter
5147 * @p: where to store the PCI settings
5149 * Determines a card's PCI mode and associated parameters, such as speed
5152 static void __devinit get_pci_mode(struct adapter *adapter,
5153 struct pci_params *p)
5158 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
5160 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
5161 p->speed = val & PCI_EXP_LNKSTA_CLS;
5162 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
5167 * init_link_config - initialize a link's SW state
5168 * @lc: structure holding the link state
5169 * @caps: link capabilities
5171 * Initializes the SW state maintained for each link, including the link's
5172 * capabilities and default speed/flow-control/autonegotiation settings.
5174 static void __devinit init_link_config(struct link_config *lc,
5177 lc->supported = caps;
5178 lc->requested_speed = 0;
5180 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
5181 if (lc->supported & FW_PORT_CAP_ANEG) {
5182 lc->advertising = lc->supported & ADVERT_MASK;
5183 lc->autoneg = AUTONEG_ENABLE;
5184 lc->requested_fc |= PAUSE_AUTONEG;
5186 lc->advertising = 0;
5187 lc->autoneg = AUTONEG_DISABLE;
5191 static int __devinit wait_dev_ready(struct adapter *adap)
5195 whoami = t4_read_reg(adap, A_PL_WHOAMI);
5197 if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
5201 whoami = t4_read_reg(adap, A_PL_WHOAMI);
5202 return (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS
5206 static int __devinit get_flash_params(struct adapter *adapter)
5211 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
5213 ret = sf1_read(adapter, 3, 0, 1, &info);
5214 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
5218 if ((info & 0xff) != 0x20) /* not a Numonix flash */
5220 info >>= 16; /* log2 of size */
5221 if (info >= 0x14 && info < 0x18)
5222 adapter->params.sf_nsec = 1 << (info - 16);
5223 else if (info == 0x18)
5224 adapter->params.sf_nsec = 64;
5227 adapter->params.sf_size = 1 << info;
5231 static void __devinit set_pcie_completion_timeout(struct adapter *adapter,
5237 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
5239 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
5242 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
5247 * t4_prep_adapter - prepare SW and HW for operation
5248 * @adapter: the adapter
5249 * @reset: if true perform a HW reset
5251 * Initialize adapter SW state for the various HW modules, set initial
5252 * values for some adapter tunables, take PHYs out of reset, and
5253 * initialize the MDIO interface.
5255 int __devinit t4_prep_adapter(struct adapter *adapter)
5259 ret = wait_dev_ready(adapter);
5263 get_pci_mode(adapter, &adapter->params.pci);
5265 adapter->params.rev = t4_read_reg(adapter, A_PL_REV);
5266 /* T4A1 chip is no longer supported */
5267 if (adapter->params.rev == 1) {
5268 CH_ALERT(adapter, "T4 rev 1 chip is no longer supported\n");
5271 adapter->params.pci.vpd_cap_addr =
5272 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
5274 ret = get_flash_params(adapter);
5278 ret = get_vpd_params(adapter, &adapter->params.vpd);
5282 if (t4_read_reg(adapter, A_PCIE_REVISION) != 0) {
5284 adapter->params.cim_la_size = 2 * CIMLA_SIZE;
5287 adapter->params.cim_la_size = CIMLA_SIZE;
5290 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
5293 * Default port and clock for debugging in case we can't reach FW.
5295 adapter->params.nports = 1;
5296 adapter->params.portvec = 1;
5297 adapter->params.vpd.cclk = 50000;
5299 /* Set pci completion timeout value to 4 seconds. */
5300 set_pcie_completion_timeout(adapter, 0xd);
5304 int __devinit t4_port_init(struct port_info *p, int mbox, int pf, int vf)
5308 struct fw_port_cmd c;
5309 unsigned int rss_size;
5310 adapter_t *adap = p->adapter;
5312 memset(&c, 0, sizeof(c));
5314 for (i = 0, j = -1; i <= p->port_id; i++) {
5317 } while ((adap->params.portvec & (1 << j)) == 0);
5320 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) |
5321 F_FW_CMD_REQUEST | F_FW_CMD_READ |
5322 V_FW_PORT_CMD_PORTID(j));
5323 c.action_to_len16 = htonl(
5324 V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
5326 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5330 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
5337 p->rss_size = rss_size;
5338 t4_os_set_hw_addr(adap, p->port_id, addr);
5340 ret = ntohl(c.u.info.lstatus_to_modtype);
5341 p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
5342 G_FW_PORT_CMD_MDIOADDR(ret) : -1;
5343 p->port_type = G_FW_PORT_CMD_PTYPE(ret);
5344 p->mod_type = G_FW_PORT_CMD_MODTYPE(ret);
5346 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));