2 * Copyright (c) 2012 Chelsio Communications, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
34 #include "t4_regs_values.h"
35 #include "firmware/t4fw_interface.h"
38 #define msleep(x) do { \
42 pause("t4hw", (x) * hz / 1000); \
46 * t4_wait_op_done_val - wait until an operation is completed
47 * @adapter: the adapter performing the operation
48 * @reg: the register to check for completion
49 * @mask: a single-bit field within @reg that indicates completion
50 * @polarity: the value of the field when the operation is completed
51 * @attempts: number of check iterations
52 * @delay: delay in usecs between iterations
53 * @valp: where to store the value of the register at completion time
55 * Wait until an operation is completed by checking a bit in a register
56 * up to @attempts times. If @valp is not NULL the value of the register
57 * at the time it indicated completion is stored there. Returns 0 if the
58 * operation completes and -EAGAIN otherwise.
60 int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
61 int polarity, int attempts, int delay, u32 *valp)
64 u32 val = t4_read_reg(adapter, reg);
66 if (!!(val & mask) == polarity) {
79 * t4_set_reg_field - set a register field to a value
80 * @adapter: the adapter to program
81 * @addr: the register address
82 * @mask: specifies the portion of the register to modify
83 * @val: the new value for the register field
85 * Sets a register field specified by the supplied mask to the
88 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
91 u32 v = t4_read_reg(adapter, addr) & ~mask;
93 t4_write_reg(adapter, addr, v | val);
94 (void) t4_read_reg(adapter, addr); /* flush */
98 * t4_read_indirect - read indirectly addressed registers
100 * @addr_reg: register holding the indirect address
101 * @data_reg: register holding the value of the indirect register
102 * @vals: where the read register values are stored
103 * @nregs: how many indirect registers to read
104 * @start_idx: index of first indirect register to read
106 * Reads registers that are accessed indirectly through an address/data
109 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
110 unsigned int data_reg, u32 *vals, unsigned int nregs,
111 unsigned int start_idx)
114 t4_write_reg(adap, addr_reg, start_idx);
115 *vals++ = t4_read_reg(adap, data_reg);
121 * t4_write_indirect - write indirectly addressed registers
123 * @addr_reg: register holding the indirect addresses
124 * @data_reg: register holding the value for the indirect registers
125 * @vals: values to write
126 * @nregs: how many indirect registers to write
127 * @start_idx: address of first indirect register to write
129 * Writes a sequential block of registers that are accessed indirectly
130 * through an address/data register pair.
132 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
133 unsigned int data_reg, const u32 *vals,
134 unsigned int nregs, unsigned int start_idx)
137 t4_write_reg(adap, addr_reg, start_idx++);
138 t4_write_reg(adap, data_reg, *vals++);
143 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
144 * mechanism. This guarantees that we get the real value even if we're
145 * operating within a Virtual Machine and the Hypervisor is trapping our
146 * Configuration Space accesses.
148 u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
150 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ,
151 F_ENABLE | F_LOCALCFG | V_FUNCTION(adap->pf) |
153 return t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
157 * t4_report_fw_error - report firmware error
160 * The adapter firmware can indicate error conditions to the host.
161 * This routine prints out the reason for the firmware error (as
162 * reported by the firmware).
164 static void t4_report_fw_error(struct adapter *adap)
166 static const char *reason[] = {
167 "Crash", /* PCIE_FW_EVAL_CRASH */
168 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
169 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
170 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
171 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
172 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
173 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
174 "Reserved", /* reserved */
178 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
179 if (pcie_fw & F_PCIE_FW_ERR)
180 CH_ERR(adap, "Firmware reports adapter error: %s\n",
181 reason[G_PCIE_FW_EVAL(pcie_fw)]);
185 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
187 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
190 for ( ; nflit; nflit--, mbox_addr += 8)
191 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
195 * Handle a FW assertion reported in a mailbox.
197 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
199 struct fw_debug_cmd asrt;
201 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
202 CH_ALERT(adap, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
203 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
204 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
207 #define X_CIM_PF_NOACCESS 0xeeeeeeee
209 * t4_wr_mbox_meat - send a command to FW through the given mailbox
211 * @mbox: index of the mailbox to use
212 * @cmd: the command to write
213 * @size: command length in bytes
214 * @rpl: where to optionally store the reply
215 * @sleep_ok: if true we may sleep while awaiting command completion
217 * Sends the given command to FW through the selected mailbox and waits
218 * for the FW to execute the command. If @rpl is not %NULL it is used to
219 * store the FW's reply to the command. The command and its optional
220 * reply are of the same length. Some FW commands like RESET and
221 * INITIALIZE can take a considerable amount of time to execute.
222 * @sleep_ok determines whether we may sleep while awaiting the response.
223 * If sleeping is allowed we use progressive backoff otherwise we spin.
225 * The return value is 0 on success or a negative errno on failure. A
226 * failure can happen either because we are not able to execute the
227 * command or FW executes it but signals an error. In the latter case
228 * the return value is the error code indicated by FW (negated).
230 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
231 void *rpl, bool sleep_ok)
234 * We delay in small increments at first in an effort to maintain
235 * responsiveness for simple, fast executing commands but then back
236 * off to larger delays to a maximum retry delay.
238 static const int delay[] = {
239 1, 1, 3, 5, 10, 10, 20, 50, 100
244 int i, ms, delay_idx;
245 const __be64 *p = cmd;
246 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
247 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
249 if ((size & 15) || size > MBOX_LEN)
252 v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
253 for (i = 0; v == X_MBOWNER_NONE && i < 3; i++)
254 v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
256 if (v != X_MBOWNER_PL)
257 return v ? -EBUSY : -ETIMEDOUT;
259 for (i = 0; i < size; i += 8, p++)
260 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
262 CH_DUMP_MBOX(adap, mbox, data_reg);
264 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
265 t4_read_reg(adap, ctl_reg); /* flush write */
270 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
272 ms = delay[delay_idx]; /* last element may repeat */
273 if (delay_idx < ARRAY_SIZE(delay) - 1)
279 v = t4_read_reg(adap, ctl_reg);
280 if (v == X_CIM_PF_NOACCESS)
282 if (G_MBOWNER(v) == X_MBOWNER_PL) {
283 if (!(v & F_MBMSGVALID)) {
284 t4_write_reg(adap, ctl_reg,
285 V_MBOWNER(X_MBOWNER_NONE));
289 CH_DUMP_MBOX(adap, mbox, data_reg);
291 res = t4_read_reg64(adap, data_reg);
292 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
293 fw_asrt(adap, data_reg);
294 res = V_FW_CMD_RETVAL(EIO);
296 get_mbox_rpl(adap, rpl, size / 8, data_reg);
297 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
298 return -G_FW_CMD_RETVAL((int)res);
303 * We timed out waiting for a reply to our mailbox command. Report
304 * the error and also check to see if the firmware reported any
307 CH_ERR(adap, "command %#x in mailbox %d timed out\n",
308 *(const u8 *)cmd, mbox);
309 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
310 t4_report_fw_error(adap);
315 * t4_mc_read - read from MC through backdoor accesses
317 * @idx: which MC to access
318 * @addr: address of first byte requested
319 * @data: 64 bytes of data containing the requested address
320 * @ecc: where to store the corresponding 64-bit ECC word
322 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
323 * that covers the requested address @addr. If @parity is not %NULL it
324 * is assigned the 64-bit ECC word for the read data.
326 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
329 u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
330 u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
333 mc_bist_cmd_reg = A_MC_BIST_CMD;
334 mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR;
335 mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN;
336 mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA;
337 mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN;
339 mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx);
340 mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx);
341 mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx);
342 mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA,
344 mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN,
348 if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST)
350 t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU);
351 t4_write_reg(adap, mc_bist_cmd_len_reg, 64);
352 t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc);
353 t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) |
354 F_START_BIST | V_BIST_CMD_GAP(1));
355 i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
359 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i)
361 for (i = 15; i >= 0; i--)
362 *data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
364 *ecc = t4_read_reg64(adap, MC_DATA(16));
370 * t4_edc_read - read from EDC through backdoor accesses
372 * @idx: which EDC to access
373 * @addr: address of first byte requested
374 * @data: 64 bytes of data containing the requested address
375 * @ecc: where to store the corresponding 64-bit ECC word
377 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
378 * that covers the requested address @addr. If @parity is not %NULL it
379 * is assigned the 64-bit ECC word for the read data.
381 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
384 u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
385 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
388 edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx);
389 edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx);
390 edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx);
391 edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN,
393 edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA,
397 * These macro are missing in t4_regs.h file.
398 * Added temporarily for testing.
400 #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
401 #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
402 edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx);
403 edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx);
404 edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx);
405 edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN,
407 edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA,
413 if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST)
415 t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU);
416 t4_write_reg(adap, edc_bist_cmd_len_reg, 64);
417 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
418 t4_write_reg(adap, edc_bist_cmd_reg,
419 V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
420 i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
424 #define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i)
426 for (i = 15; i >= 0; i--)
427 *data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
429 *ecc = t4_read_reg64(adap, EDC_DATA(16));
435 * t4_mem_read - read EDC 0, EDC 1 or MC into buffer
437 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
438 * @addr: address within indicated memory type
439 * @len: amount of memory to read
440 * @buf: host memory buffer
442 * Reads an [almost] arbitrary memory region in the firmware: the
443 * firmware memory address, length and host buffer must be aligned on
444 * 32-bit boudaries. The memory is returned as a raw byte sequence from
445 * the firmware's memory. If this memory contains data structures which
446 * contain multi-byte integers, it's the callers responsibility to
447 * perform appropriate byte order conversions.
449 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
452 u32 pos, start, end, offset;
456 * Argument sanity checks ...
458 if ((addr & 0x3) || (len & 0x3))
462 * The underlaying EDC/MC read routines read 64 bytes at a time so we
463 * need to round down the start and round up the end. We'll start
464 * copying out of the first line at (addr - start) a word at a time.
466 start = addr & ~(64-1);
467 end = (addr + len + 64-1) & ~(64-1);
468 offset = (addr - start)/sizeof(__be32);
470 for (pos = start; pos < end; pos += 64, offset = 0) {
474 * Read the chip's memory block and bail if there's an error.
476 if ((mtype == MEM_MC) || (mtype == MEM_MC1))
477 ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL);
479 ret = t4_edc_read(adap, mtype, pos, data, NULL);
484 * Copy the data into the caller's memory buffer.
486 while (offset < 16 && len > 0) {
487 *buf++ = data[offset++];
488 len -= sizeof(__be32);
496 * Partial EEPROM Vital Product Data structure. Includes only the ID and
508 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
510 #define EEPROM_MAX_RD_POLL 40
511 #define EEPROM_MAX_WR_POLL 6
512 #define EEPROM_STAT_ADDR 0x7bfc
513 #define VPD_BASE 0x400
514 #define VPD_BASE_OLD 0
516 #define VPD_INFO_FLD_HDR_SIZE 3
517 #define CHELSIO_VPD_UNIQUE_ID 0x82
520 * t4_seeprom_read - read a serial EEPROM location
521 * @adapter: adapter to read
522 * @addr: EEPROM virtual address
523 * @data: where to store the read data
525 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
526 * VPD capability. Note that this function must be called with a virtual
529 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
532 int attempts = EEPROM_MAX_RD_POLL;
533 unsigned int base = adapter->params.pci.vpd_cap_addr;
535 if (addr >= EEPROMVSIZE || (addr & 3))
538 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
541 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
542 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
544 if (!(val & PCI_VPD_ADDR_F)) {
545 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
548 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
549 *data = le32_to_cpu(*data);
554 * t4_seeprom_write - write a serial EEPROM location
555 * @adapter: adapter to write
556 * @addr: virtual EEPROM address
557 * @data: value to write
559 * Write a 32-bit word to a location in serial EEPROM using the card's PCI
560 * VPD capability. Note that this function must be called with a virtual
563 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
566 int attempts = EEPROM_MAX_WR_POLL;
567 unsigned int base = adapter->params.pci.vpd_cap_addr;
569 if (addr >= EEPROMVSIZE || (addr & 3))
572 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
574 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
575 (u16)addr | PCI_VPD_ADDR_F);
578 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
579 } while ((val & PCI_VPD_ADDR_F) && --attempts);
581 if (val & PCI_VPD_ADDR_F) {
582 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
589 * t4_eeprom_ptov - translate a physical EEPROM address to virtual
590 * @phys_addr: the physical EEPROM address
591 * @fn: the PCI function number
592 * @sz: size of function-specific area
594 * Translate a physical EEPROM address to virtual. The first 1K is
595 * accessed through virtual addresses starting at 31K, the rest is
596 * accessed through virtual addresses starting at 0.
598 * The mapping is as follows:
599 * [0..1K) -> [31K..32K)
600 * [1K..1K+A) -> [ES-A..ES)
601 * [1K+A..ES) -> [0..ES-A-1K)
603 * where A = @fn * @sz, and ES = EEPROM size.
605 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
608 if (phys_addr < 1024)
609 return phys_addr + (31 << 10);
610 if (phys_addr < 1024 + fn)
611 return EEPROMSIZE - fn + phys_addr - 1024;
612 if (phys_addr < EEPROMSIZE)
613 return phys_addr - 1024 - fn;
618 * t4_seeprom_wp - enable/disable EEPROM write protection
619 * @adapter: the adapter
620 * @enable: whether to enable or disable write protection
622 * Enables or disables write protection on the serial EEPROM.
624 int t4_seeprom_wp(struct adapter *adapter, int enable)
626 return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
630 * get_vpd_keyword_val - Locates an information field keyword in the VPD
631 * @v: Pointer to buffered vpd data structure
632 * @kw: The keyword to search for
634 * Returns the value of the information field keyword or
637 static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
640 unsigned int offset , len;
641 const u8 *buf = &v->id_tag;
642 const u8 *vpdr_len = &v->vpdr_tag;
643 offset = sizeof(struct t4_vpd_hdr);
644 len = (u16)vpdr_len[1] + ((u16)vpdr_len[2] << 8);
646 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
650 for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
651 if(memcmp(buf + i , kw , 2) == 0){
652 i += VPD_INFO_FLD_HDR_SIZE;
656 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
664 * get_vpd_params - read VPD parameters from VPD EEPROM
665 * @adapter: adapter to read
666 * @p: where to store the parameters
668 * Reads card parameters stored in VPD EEPROM.
670 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
674 u8 vpd[VPD_LEN], csum;
675 const struct t4_vpd_hdr *v;
678 * Card information normally starts at VPD_BASE but early cards had
681 ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd));
682 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
684 for (i = 0; i < sizeof(vpd); i += 4) {
685 ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i));
689 v = (const struct t4_vpd_hdr *)vpd;
691 #define FIND_VPD_KW(var,name) do { \
692 var = get_vpd_keyword_val(v , name); \
694 CH_ERR(adapter, "missing VPD keyword " name "\n"); \
699 FIND_VPD_KW(i, "RV");
700 for (csum = 0; i >= 0; i--)
704 CH_ERR(adapter, "corrupted VPD EEPROM, actual csum %u\n", csum);
707 FIND_VPD_KW(ec, "EC");
708 FIND_VPD_KW(sn, "SN");
709 FIND_VPD_KW(pn, "PN");
710 FIND_VPD_KW(na, "NA");
713 memcpy(p->id, v->id_data, ID_LEN);
715 memcpy(p->ec, vpd + ec, EC_LEN);
717 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
718 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
720 i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2];
721 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
722 strstrip((char *)p->pn);
723 i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2];
724 memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
725 strstrip((char *)p->na);
730 /* serial flash and firmware constants and flash config file constants */
732 SF_ATTEMPTS = 10, /* max retries for SF operations */
734 /* flash command opcodes */
735 SF_PROG_PAGE = 2, /* program page */
736 SF_WR_DISABLE = 4, /* disable writes */
737 SF_RD_STATUS = 5, /* read status register */
738 SF_WR_ENABLE = 6, /* enable writes */
739 SF_RD_DATA_FAST = 0xb, /* read flash */
740 SF_RD_ID = 0x9f, /* read ID */
741 SF_ERASE_SECTOR = 0xd8, /* erase sector */
745 * sf1_read - read data from the serial flash
746 * @adapter: the adapter
747 * @byte_cnt: number of bytes to read
748 * @cont: whether another operation will be chained
749 * @lock: whether to lock SF for PL access only
750 * @valp: where to store the read data
752 * Reads up to 4 bytes of data from the serial flash. The location of
753 * the read needs to be specified prior to calling this by issuing the
754 * appropriate commands to the serial flash.
756 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
761 if (!byte_cnt || byte_cnt > 4)
763 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
765 t4_write_reg(adapter, A_SF_OP,
766 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
767 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
769 *valp = t4_read_reg(adapter, A_SF_DATA);
774 * sf1_write - write data to the serial flash
775 * @adapter: the adapter
776 * @byte_cnt: number of bytes to write
777 * @cont: whether another operation will be chained
778 * @lock: whether to lock SF for PL access only
779 * @val: value to write
781 * Writes up to 4 bytes of data to the serial flash. The location of
782 * the write needs to be specified prior to calling this by issuing the
783 * appropriate commands to the serial flash.
785 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
788 if (!byte_cnt || byte_cnt > 4)
790 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
792 t4_write_reg(adapter, A_SF_DATA, val);
793 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
794 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
795 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
799 * flash_wait_op - wait for a flash operation to complete
800 * @adapter: the adapter
801 * @attempts: max number of polls of the status register
802 * @delay: delay between polls in ms
804 * Wait for a flash operation to complete by polling the status register.
806 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
812 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
813 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
825 * t4_read_flash - read words from serial flash
826 * @adapter: the adapter
827 * @addr: the start address for the read
828 * @nwords: how many 32-bit words to read
829 * @data: where to store the read data
830 * @byte_oriented: whether to store data as bytes or as words
832 * Read the specified number of 32-bit words from the serial flash.
833 * If @byte_oriented is set the read data is stored as a byte array
834 * (i.e., big-endian), otherwise as 32-bit words in the platform's
837 int t4_read_flash(struct adapter *adapter, unsigned int addr,
838 unsigned int nwords, u32 *data, int byte_oriented)
842 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
845 addr = swab32(addr) | SF_RD_DATA_FAST;
847 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
848 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
851 for ( ; nwords; nwords--, data++) {
852 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
854 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
858 *data = htonl(*data);
864 * t4_write_flash - write up to a page of data to the serial flash
865 * @adapter: the adapter
866 * @addr: the start address to write
867 * @n: length of data to write in bytes
868 * @data: the data to write
869 * @byte_oriented: whether to store data as bytes or as words
871 * Writes up to a page of data (256 bytes) to the serial flash starting
872 * at the given address. All the data must be written to the same page.
873 * If @byte_oriented is set the write data is stored as byte stream
874 * (i.e. matches what on disk), otherwise in big-endian.
876 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
877 unsigned int n, const u8 *data, int byte_oriented)
880 u32 buf[SF_PAGE_SIZE / 4];
881 unsigned int i, c, left, val, offset = addr & 0xff;
883 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
886 val = swab32(addr) | SF_PROG_PAGE;
888 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
889 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
892 for (left = n; left; left -= c) {
894 for (val = 0, i = 0; i < c; ++i)
895 val = (val << 8) + *data++;
900 ret = sf1_write(adapter, c, c != left, 1, val);
904 ret = flash_wait_op(adapter, 8, 1);
908 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
910 /* Read the page to verify the write succeeded */
911 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
916 if (memcmp(data - n, (u8 *)buf + offset, n)) {
917 CH_ERR(adapter, "failed to correctly write the flash page "
924 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
929 * t4_get_fw_version - read the firmware version
930 * @adapter: the adapter
931 * @vers: where to place the version
933 * Reads the FW version from flash.
935 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
937 return t4_read_flash(adapter,
938 FLASH_FW_START + offsetof(struct fw_hdr, fw_ver), 1,
943 * t4_get_tp_version - read the TP microcode version
944 * @adapter: the adapter
945 * @vers: where to place the version
947 * Reads the TP microcode version from flash.
949 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
951 return t4_read_flash(adapter, FLASH_FW_START + offsetof(struct fw_hdr,
957 * t4_check_fw_version - check if the FW is compatible with this driver
958 * @adapter: the adapter
960 * Checks if an adapter's FW is compatible with the driver. Returns 0
961 * if there's exact match, a negative error if the version could not be
962 * read or there's a major version mismatch, and a positive value if the
963 * expected major version is found but there's a minor version mismatch.
965 int t4_check_fw_version(struct adapter *adapter)
967 int ret, major, minor, micro;
968 int exp_major, exp_minor, exp_micro;
970 ret = t4_get_fw_version(adapter, &adapter->params.fw_vers);
972 ret = t4_get_tp_version(adapter, &adapter->params.tp_vers);
976 major = G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers);
977 minor = G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers);
978 micro = G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers);
980 switch (chip_id(adapter)) {
982 exp_major = T4FW_VERSION_MAJOR;
983 exp_minor = T4FW_VERSION_MINOR;
984 exp_micro = T4FW_VERSION_MICRO;
987 exp_major = T5FW_VERSION_MAJOR;
988 exp_minor = T5FW_VERSION_MINOR;
989 exp_micro = T5FW_VERSION_MICRO;
992 CH_ERR(adapter, "Unsupported chip type, %x\n",
997 if (major != exp_major) { /* major mismatch - fail */
998 CH_ERR(adapter, "card FW has major version %u, driver wants "
999 "%u\n", major, exp_major);
1003 if (minor == exp_minor && micro == exp_micro)
1004 return 0; /* perfect match */
1006 /* Minor/micro version mismatch. Report it but often it's OK. */
1011 * t4_flash_erase_sectors - erase a range of flash sectors
1012 * @adapter: the adapter
1013 * @start: the first sector to erase
1014 * @end: the last sector to erase
1016 * Erases the sectors in the given inclusive range.
1018 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
1022 while (start <= end) {
1023 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
1024 (ret = sf1_write(adapter, 4, 0, 1,
1025 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1026 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
1027 CH_ERR(adapter, "erase of flash sector %d failed, "
1028 "error %d\n", start, ret);
1033 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
1038 * t4_flash_cfg_addr - return the address of the flash configuration file
1039 * @adapter: the adapter
1041 * Return the address within the flash where the Firmware Configuration
1042 * File is stored, or an error if the device FLASH is too small to contain
1043 * a Firmware Configuration File.
1045 int t4_flash_cfg_addr(struct adapter *adapter)
1048 * If the device FLASH isn't large enough to hold a Firmware
1049 * Configuration File, return an error.
1051 if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
1054 return FLASH_CFG_START;
1058 * t4_load_cfg - download config file
1059 * @adap: the adapter
1060 * @cfg_data: the cfg text file to write
1061 * @size: text file size
1063 * Write the supplied config text file to the card's serial flash.
1065 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
1067 int ret, i, n, cfg_addr;
1069 unsigned int flash_cfg_start_sec;
1070 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1072 cfg_addr = t4_flash_cfg_addr(adap);
1077 flash_cfg_start_sec = addr / SF_SEC_SIZE;
1079 if (size > FLASH_CFG_MAX_SIZE) {
1080 CH_ERR(adap, "cfg file too large, max is %u bytes\n",
1081 FLASH_CFG_MAX_SIZE);
1085 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
1087 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
1088 flash_cfg_start_sec + i - 1);
1090 * If size == 0 then we're simply erasing the FLASH sectors associated
1091 * with the on-adapter Firmware Configuration File.
1093 if (ret || size == 0)
1096 /* this will write to the flash up to SF_PAGE_SIZE at a time */
1097 for (i = 0; i< size; i+= SF_PAGE_SIZE) {
1098 if ( (size - i) < SF_PAGE_SIZE)
1102 ret = t4_write_flash(adap, addr, n, cfg_data, 1);
1106 addr += SF_PAGE_SIZE;
1107 cfg_data += SF_PAGE_SIZE;
1112 CH_ERR(adap, "config file %s failed %d\n",
1113 (size == 0 ? "clear" : "download"), ret);
1119 * t4_load_fw - download firmware
1120 * @adap: the adapter
1121 * @fw_data: the firmware image to write
1124 * Write the supplied firmware image to the card's serial flash.
1126 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1131 u8 first_page[SF_PAGE_SIZE];
1132 const u32 *p = (const u32 *)fw_data;
1133 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
1134 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1135 unsigned int fw_start_sec;
1136 unsigned int fw_start;
1137 unsigned int fw_size;
1139 if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) {
1140 fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
1141 fw_start = FLASH_FWBOOTSTRAP_START;
1142 fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
1144 fw_start_sec = FLASH_FW_START_SEC;
1145 fw_start = FLASH_FW_START;
1146 fw_size = FLASH_FW_MAX_SIZE;
1149 CH_ERR(adap, "FW image has no data\n");
1153 CH_ERR(adap, "FW image size not multiple of 512 bytes\n");
1156 if (ntohs(hdr->len512) * 512 != size) {
1157 CH_ERR(adap, "FW image size differs from size in FW header\n");
1160 if (size > fw_size) {
1161 CH_ERR(adap, "FW image too large, max is %u bytes\n", fw_size);
1164 if ((is_t4(adap) && hdr->chip != FW_HDR_CHIP_T4) ||
1165 (is_t5(adap) && hdr->chip != FW_HDR_CHIP_T5)) {
1167 "FW image (%d) is not suitable for this adapter (%d)\n",
1168 hdr->chip, chip_id(adap));
1172 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1173 csum += ntohl(p[i]);
1175 if (csum != 0xffffffff) {
1176 CH_ERR(adap, "corrupted firmware image, checksum %#x\n",
1181 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
1182 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
1187 * We write the correct version at the end so the driver can see a bad
1188 * version if the FW write fails. Start by writing a copy of the
1189 * first page with a bad version.
1191 memcpy(first_page, fw_data, SF_PAGE_SIZE);
1192 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
1193 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
1198 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1199 addr += SF_PAGE_SIZE;
1200 fw_data += SF_PAGE_SIZE;
1201 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
1206 ret = t4_write_flash(adap,
1207 fw_start + offsetof(struct fw_hdr, fw_ver),
1208 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
1211 CH_ERR(adap, "firmware download failed, error %d\n", ret);
1215 /* BIOS boot headers */
1216 typedef struct pci_expansion_rom_header {
1217 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
1218 u8 reserved[22]; /* Reserved per processor Architecture data */
1219 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
1220 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
1222 /* Legacy PCI Expansion ROM Header */
1223 typedef struct legacy_pci_expansion_rom_header {
1224 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
1225 u8 size512; /* Current Image Size in units of 512 bytes */
1226 u8 initentry_point[4];
1227 u8 cksum; /* Checksum computed on the entire Image */
1228 u8 reserved[16]; /* Reserved */
1229 u8 pcir_offset[2]; /* Offset to PCI Data Struture */
1230 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
1232 /* EFI PCI Expansion ROM Header */
1233 typedef struct efi_pci_expansion_rom_header {
1234 u8 signature[2]; // ROM signature. The value 0xaa55
1235 u8 initialization_size[2]; /* Units 512. Includes this header */
1236 u8 efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
1237 u8 efi_subsystem[2]; /* Subsystem value for EFI image header */
1238 u8 efi_machine_type[2]; /* Machine type from EFI image header */
1239 u8 compression_type[2]; /* Compression type. */
1241 * Compression type definition
1244 * 0x2-0xFFFF: Reserved
1246 u8 reserved[8]; /* Reserved */
1247 u8 efi_image_header_offset[2]; /* Offset to EFI Image */
1248 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
1249 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
1251 /* PCI Data Structure Format */
1252 typedef struct pcir_data_structure { /* PCI Data Structure */
1253 u8 signature[4]; /* Signature. The string "PCIR" */
1254 u8 vendor_id[2]; /* Vendor Identification */
1255 u8 device_id[2]; /* Device Identification */
1256 u8 vital_product[2]; /* Pointer to Vital Product Data */
1257 u8 length[2]; /* PCIR Data Structure Length */
1258 u8 revision; /* PCIR Data Structure Revision */
1259 u8 class_code[3]; /* Class Code */
1260 u8 image_length[2]; /* Image Length. Multiple of 512B */
1261 u8 code_revision[2]; /* Revision Level of Code/Data */
1262 u8 code_type; /* Code Type. */
1264 * PCI Expansion ROM Code Types
1265 * 0x00: Intel IA-32, PC-AT compatible. Legacy
1266 * 0x01: Open Firmware standard for PCI. FCODE
1267 * 0x02: Hewlett-Packard PA RISC. HP reserved
1268 * 0x03: EFI Image. EFI
1269 * 0x04-0xFF: Reserved.
1271 u8 indicator; /* Indicator. Identifies the last image in the ROM */
1272 u8 reserved[2]; /* Reserved */
1273 } pcir_data_t; /* PCI__DATA_STRUCTURE */
1275 /* BOOT constants */
1277 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
1278 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */
1279 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */
1280 BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
1281 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment */
1282 VENDOR_ID = 0x1425, /* Vendor ID */
1283 PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
1287 * modify_device_id - Modifies the device ID of the Boot BIOS image
1288 * @adatper: the device ID to write.
1289 * @boot_data: the boot image to modify.
1291 * Write the supplied device ID to the boot BIOS image.
1293 static void modify_device_id(int device_id, u8 *boot_data)
1295 legacy_pci_exp_rom_header_t *header;
1296 pcir_data_t *pcir_header;
1300 * Loop through all chained images and change the device ID's
1303 header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
1304 pcir_header = (pcir_data_t *) &boot_data[cur_header +
1305 le16_to_cpu(*(u16*)header->pcir_offset)];
1308 * Only modify the Device ID if code type is Legacy or HP.
1309 * 0x00: Okay to modify
1310 * 0x01: FCODE. Do not be modify
1311 * 0x03: Okay to modify
1312 * 0x04-0xFF: Do not modify
1314 if (pcir_header->code_type == 0x00) {
1319 * Modify Device ID to match current adatper
1321 *(u16*) pcir_header->device_id = device_id;
1324 * Set checksum temporarily to 0.
1325 * We will recalculate it later.
1327 header->cksum = 0x0;
1330 * Calculate and update checksum
1332 for (i = 0; i < (header->size512 * 512); i++)
1333 csum += (u8)boot_data[cur_header + i];
1336 * Invert summed value to create the checksum
1337 * Writing new checksum value directly to the boot data
1339 boot_data[cur_header + 7] = -csum;
1341 } else if (pcir_header->code_type == 0x03) {
1344 * Modify Device ID to match current adatper
1346 *(u16*) pcir_header->device_id = device_id;
1352 * Check indicator element to identify if this is the last
1355 if (pcir_header->indicator & 0x80)
1359 * Move header pointer up to the next image in the ROM.
1361 cur_header += header->size512 * 512;
1366 * t4_load_boot - download boot flash
1367 * @adapter: the adapter
1368 * @boot_data: the boot image to write
1369 * @boot_addr: offset in flash to write boot_data
1372 * Write the supplied boot image to the card's serial flash.
1373 * The boot image has the following sections: a 28-byte header and the
1376 int t4_load_boot(struct adapter *adap, u8 *boot_data,
1377 unsigned int boot_addr, unsigned int size)
1379 pci_exp_rom_header_t *header;
1381 pcir_data_t *pcir_header;
1385 unsigned int boot_sector = boot_addr * 1024;
1386 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1389 * Make sure the boot image does not encroach on the firmware region
1391 if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
1392 CH_ERR(adap, "boot image encroaching on firmware region\n");
1397 * Number of sectors spanned
1399 i = DIV_ROUND_UP(size ? size : FLASH_BOOTCFG_MAX_SIZE,
1401 ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
1402 (boot_sector >> 16) + i - 1);
1405 * If size == 0 then we're simply erasing the FLASH sectors associated
1406 * with the on-adapter option ROM file
1408 if (ret || (size == 0))
1411 /* Get boot header */
1412 header = (pci_exp_rom_header_t *)boot_data;
1413 pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
1414 /* PCIR Data Structure */
1415 pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
1418 * Perform some primitive sanity testing to avoid accidentally
1419 * writing garbage over the boot sectors. We ought to check for
1420 * more but it's not worth it for now ...
1422 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
1423 CH_ERR(adap, "boot image too small/large\n");
1428 * Check BOOT ROM header signature
1430 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
1431 CH_ERR(adap, "Boot image missing signature\n");
1436 * Check PCI header signature
1438 if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
1439 CH_ERR(adap, "PCI header missing signature\n");
1444 * Check Vendor ID matches Chelsio ID
1446 if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
1447 CH_ERR(adap, "Vendor ID missing signature\n");
1452 * Retrieve adapter's device ID
1454 t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
1455 /* Want to deal with PF 0 so I strip off PF 4 indicator */
1456 device_id = (device_id & 0xff) | 0x4000;
1459 * Check PCIE Device ID
1461 if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
1463 * Change the device ID in the Boot BIOS image to match
1464 * the Device ID of the current adapter.
1466 modify_device_id(device_id, boot_data);
1470 * Skip over the first SF_PAGE_SIZE worth of data and write it after
1471 * we finish copying the rest of the boot image. This will ensure
1472 * that the BIOS boot header will only be written if the boot image
1473 * was written in full.
1476 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1477 addr += SF_PAGE_SIZE;
1478 boot_data += SF_PAGE_SIZE;
1479 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
1484 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, boot_data, 0);
1488 CH_ERR(adap, "boot image download failed, error %d\n", ret);
1493 * t4_read_cimq_cfg - read CIM queue configuration
1494 * @adap: the adapter
1495 * @base: holds the queue base addresses in bytes
1496 * @size: holds the queue sizes in bytes
1497 * @thres: holds the queue full thresholds in bytes
1499 * Returns the current configuration of the CIM queues, starting with
1500 * the IBQs, then the OBQs.
1502 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
1505 int cim_num_obq = is_t4(adap) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
1507 for (i = 0; i < CIM_NUM_IBQ; i++) {
1508 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
1510 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1511 *base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1512 *size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1513 *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
1515 for (i = 0; i < cim_num_obq; i++) {
1516 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1518 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1519 *base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1520 *size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1525 * t4_read_cim_ibq - read the contents of a CIM inbound queue
1526 * @adap: the adapter
1527 * @qid: the queue index
1528 * @data: where to store the queue contents
1529 * @n: capacity of @data in 32-bit words
1531 * Reads the contents of the selected CIM queue starting at address 0 up
1532 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
1533 * error and the number of 32-bit words actually read on success.
1535 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1539 const unsigned int nwords = CIM_IBQ_SIZE * 4;
1541 if (qid > 5 || (n & 3))
1544 addr = qid * nwords;
1548 for (i = 0; i < n; i++, addr++) {
1549 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
1552 * It might take 3-10ms before the IBQ debug read access is
1553 * allowed. Wait for 1 Sec with a delay of 1 usec.
1555 err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
1559 *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
1561 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
1566 * t4_read_cim_obq - read the contents of a CIM outbound queue
1567 * @adap: the adapter
1568 * @qid: the queue index
1569 * @data: where to store the queue contents
1570 * @n: capacity of @data in 32-bit words
1572 * Reads the contents of the selected CIM queue starting at address 0 up
1573 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
1574 * error and the number of 32-bit words actually read on success.
1576 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1579 unsigned int addr, v, nwords;
1580 int cim_num_obq = is_t4(adap) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
1582 if (qid >= cim_num_obq || (n & 3))
1585 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1586 V_QUENUMSELECT(qid));
1587 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1589 addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */
1590 nwords = G_CIMQSIZE(v) * 64; /* same */
1594 for (i = 0; i < n; i++, addr++) {
1595 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
1597 err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
1601 *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
1603 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
1609 CIM_CTL_BASE = 0x2000,
1610 CIM_PBT_ADDR_BASE = 0x2800,
1611 CIM_PBT_LRF_BASE = 0x3000,
1612 CIM_PBT_DATA_BASE = 0x3800
1616 * t4_cim_read - read a block from CIM internal address space
1617 * @adap: the adapter
1618 * @addr: the start address within the CIM address space
1619 * @n: number of words to read
1620 * @valp: where to store the result
1622 * Reads a block of 4-byte words from the CIM intenal address space.
1624 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
1629 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1632 for ( ; !ret && n--; addr += 4) {
1633 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
1634 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1637 *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
1643 * t4_cim_write - write a block into CIM internal address space
1644 * @adap: the adapter
1645 * @addr: the start address within the CIM address space
1646 * @n: number of words to write
1647 * @valp: set of values to write
1649 * Writes a block of 4-byte words into the CIM intenal address space.
1651 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
1652 const unsigned int *valp)
1656 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1659 for ( ; !ret && n--; addr += 4) {
1660 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
1661 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
1662 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1668 static int t4_cim_write1(struct adapter *adap, unsigned int addr, unsigned int val)
1670 return t4_cim_write(adap, addr, 1, &val);
1674 * t4_cim_ctl_read - read a block from CIM control region
1675 * @adap: the adapter
1676 * @addr: the start address within the CIM control region
1677 * @n: number of words to read
1678 * @valp: where to store the result
1680 * Reads a block of 4-byte words from the CIM control region.
1682 int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
1685 return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
1689 * t4_cim_read_la - read CIM LA capture buffer
1690 * @adap: the adapter
1691 * @la_buf: where to store the LA data
1692 * @wrptr: the HW write pointer within the capture buffer
1694 * Reads the contents of the CIM LA buffer with the most recent entry at
1695 * the end of the returned data and with the entry at @wrptr first.
1696 * We try to leave the LA in the running state we find it in.
1698 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
1701 unsigned int cfg, val, idx;
1703 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
1707 if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */
1708 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
1713 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1717 idx = G_UPDBGLAWRPTR(val);
1721 for (i = 0; i < adap->params.cim_la_size; i++) {
1722 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1723 V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
1726 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1729 if (val & F_UPDBGLARDEN) {
1733 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
1736 idx = (idx + 1) & M_UPDBGLARDPTR;
1739 if (cfg & F_UPDBGLAEN) {
1740 int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1741 cfg & ~F_UPDBGLARDEN);
1748 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
1749 unsigned int *pif_req_wrptr,
1750 unsigned int *pif_rsp_wrptr)
1753 u32 cfg, val, req, rsp;
1755 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1756 if (cfg & F_LADBGEN)
1757 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1759 val = t4_read_reg(adap, A_CIM_DEBUGSTS);
1760 req = G_POLADBGWRPTR(val);
1761 rsp = G_PILADBGWRPTR(val);
1763 *pif_req_wrptr = req;
1765 *pif_rsp_wrptr = rsp;
1767 for (i = 0; i < CIM_PIFLA_SIZE; i++) {
1768 for (j = 0; j < 6; j++) {
1769 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
1770 V_PILADBGRDPTR(rsp));
1771 *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
1772 *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
1776 req = (req + 2) & M_POLADBGRDPTR;
1777 rsp = (rsp + 2) & M_PILADBGRDPTR;
1779 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1782 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
1787 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1788 if (cfg & F_LADBGEN)
1789 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1791 for (i = 0; i < CIM_MALA_SIZE; i++) {
1792 for (j = 0; j < 5; j++) {
1794 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
1795 V_PILADBGRDPTR(idx));
1796 *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
1797 *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
1800 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1804 * t4_tp_read_la - read TP LA capture buffer
1805 * @adap: the adapter
1806 * @la_buf: where to store the LA data
1807 * @wrptr: the HW write pointer within the capture buffer
1809 * Reads the contents of the TP LA buffer with the most recent entry at
1810 * the end of the returned data and with the entry at @wrptr first.
1811 * We leave the LA in the running state we find it in.
1813 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
1815 bool last_incomplete;
1816 unsigned int i, cfg, val, idx;
1818 cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
1819 if (cfg & F_DBGLAENABLE) /* freeze LA */
1820 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1821 adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
1823 val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
1824 idx = G_DBGLAWPTR(val);
1825 last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
1826 if (last_incomplete)
1827 idx = (idx + 1) & M_DBGLARPTR;
1832 val &= ~V_DBGLARPTR(M_DBGLARPTR);
1833 val |= adap->params.tp.la_mask;
1835 for (i = 0; i < TPLA_SIZE; i++) {
1836 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
1837 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
1838 idx = (idx + 1) & M_DBGLARPTR;
1841 /* Wipe out last entry if it isn't valid */
1842 if (last_incomplete)
1843 la_buf[TPLA_SIZE - 1] = ~0ULL;
1845 if (cfg & F_DBGLAENABLE) /* restore running state */
1846 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1847 cfg | adap->params.tp.la_mask);
1850 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
1854 for (i = 0; i < 8; i++) {
1855 u32 *p = la_buf + i;
1857 t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
1858 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
1859 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
1860 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
1861 *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
1865 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1866 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
1867 FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
1870 * t4_link_start - apply link configuration to MAC/PHY
1871 * @phy: the PHY to setup
1872 * @mac: the MAC to setup
1873 * @lc: the requested link configuration
1875 * Set up a port's MAC and PHY according to a desired link configuration.
1876 * - If the PHY can auto-negotiate first decide what to advertise, then
1877 * enable/disable auto-negotiation as desired, and reset.
1878 * - If the PHY does not auto-negotiate just reset it.
1879 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1880 * otherwise do it later based on the outcome of auto-negotiation.
1882 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1883 struct link_config *lc)
1885 struct fw_port_cmd c;
1886 unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
1889 if (lc->requested_fc & PAUSE_RX)
1890 fc |= FW_PORT_CAP_FC_RX;
1891 if (lc->requested_fc & PAUSE_TX)
1892 fc |= FW_PORT_CAP_FC_TX;
1894 memset(&c, 0, sizeof(c));
1895 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1896 F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1897 c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1900 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1901 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1902 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1903 } else if (lc->autoneg == AUTONEG_DISABLE) {
1904 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1905 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1907 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1909 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1913 * t4_restart_aneg - restart autonegotiation
1914 * @adap: the adapter
1915 * @mbox: mbox to use for the FW command
1916 * @port: the port id
1918 * Restarts autonegotiation for the selected port.
1920 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1922 struct fw_port_cmd c;
1924 memset(&c, 0, sizeof(c));
1925 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1926 F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1927 c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1929 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1930 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1934 unsigned int mask; /* bits to check in interrupt status */
1935 const char *msg; /* message to print or NULL */
1936 short stat_idx; /* stat counter to increment or -1 */
1937 unsigned short fatal; /* whether the condition reported is fatal */
1941 * t4_handle_intr_status - table driven interrupt handler
1942 * @adapter: the adapter that generated the interrupt
1943 * @reg: the interrupt status register to process
1944 * @acts: table of interrupt actions
1946 * A table driven interrupt handler that applies a set of masks to an
1947 * interrupt status word and performs the corresponding actions if the
1948 * interrupts described by the mask have occured. The actions include
1949 * optionally emitting a warning or alert message. The table is terminated
1950 * by an entry specifying mask 0. Returns the number of fatal interrupt
1953 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1954 const struct intr_info *acts)
1957 unsigned int mask = 0;
1958 unsigned int status = t4_read_reg(adapter, reg);
1960 for ( ; acts->mask; ++acts) {
1961 if (!(status & acts->mask))
1965 CH_ALERT(adapter, "%s (0x%x)\n",
1966 acts->msg, status & acts->mask);
1967 } else if (acts->msg)
1968 CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n",
1969 acts->msg, status & acts->mask);
1973 if (status) /* clear processed interrupts */
1974 t4_write_reg(adapter, reg, status);
1979 * Interrupt handler for the PCIE module.
1981 static void pcie_intr_handler(struct adapter *adapter)
1983 static struct intr_info sysbus_intr_info[] = {
1984 { F_RNPP, "RXNP array parity error", -1, 1 },
1985 { F_RPCP, "RXPC array parity error", -1, 1 },
1986 { F_RCIP, "RXCIF array parity error", -1, 1 },
1987 { F_RCCP, "Rx completions control array parity error", -1, 1 },
1988 { F_RFTP, "RXFT array parity error", -1, 1 },
1991 static struct intr_info pcie_port_intr_info[] = {
1992 { F_TPCP, "TXPC array parity error", -1, 1 },
1993 { F_TNPP, "TXNP array parity error", -1, 1 },
1994 { F_TFTP, "TXFT array parity error", -1, 1 },
1995 { F_TCAP, "TXCA array parity error", -1, 1 },
1996 { F_TCIP, "TXCIF array parity error", -1, 1 },
1997 { F_RCAP, "RXCA array parity error", -1, 1 },
1998 { F_OTDD, "outbound request TLP discarded", -1, 1 },
1999 { F_RDPE, "Rx data parity error", -1, 1 },
2000 { F_TDUE, "Tx uncorrectable data error", -1, 1 },
2003 static struct intr_info pcie_intr_info[] = {
2004 { F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
2005 { F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
2006 { F_MSIDATAPERR, "MSI data parity error", -1, 1 },
2007 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
2008 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
2009 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
2010 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
2011 { F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
2012 { F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
2013 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
2014 { F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
2015 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
2016 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
2017 { F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
2018 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
2019 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
2020 { F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
2021 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
2022 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
2023 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
2024 { F_FIDPERR, "PCI FID parity error", -1, 1 },
2025 { F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
2026 { F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
2027 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
2028 { F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
2029 { F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
2030 { F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
2031 { F_PCIESINT, "PCI core secondary fault", -1, 1 },
2032 { F_PCIEPINT, "PCI core primary fault", -1, 1 },
2033 { F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
2038 static struct intr_info t5_pcie_intr_info[] = {
2039 { F_MSTGRPPERR, "Master Response Read Queue parity error",
2041 { F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
2042 { F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
2043 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
2044 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
2045 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
2046 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
2047 { F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
2049 { F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
2051 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
2052 { F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
2053 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
2054 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
2055 { F_DREQWRPERR, "PCI DMA channel write request parity error",
2057 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
2058 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
2059 { F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
2060 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
2061 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
2062 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
2063 { F_FIDPERR, "PCI FID parity error", -1, 1 },
2064 { F_VFIDPERR, "PCI INTx clear parity error", -1, 1 },
2065 { F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
2066 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
2067 { F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
2069 { F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
2071 { F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
2072 { F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
2073 { F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
2074 { F_READRSPERR, "Outbound read error", -1,
2082 fat = t4_handle_intr_status(adapter,
2083 A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
2085 t4_handle_intr_status(adapter,
2086 A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
2087 pcie_port_intr_info) +
2088 t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
2091 fat = t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
2094 t4_fatal_err(adapter);
2098 * TP interrupt handler.
2100 static void tp_intr_handler(struct adapter *adapter)
2102 static struct intr_info tp_intr_info[] = {
2103 { 0x3fffffff, "TP parity error", -1, 1 },
2104 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
2108 if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
2109 t4_fatal_err(adapter);
2113 * SGE interrupt handler.
2115 static void sge_intr_handler(struct adapter *adapter)
2120 static struct intr_info sge_intr_info[] = {
2121 { F_ERR_CPL_EXCEED_IQE_SIZE,
2122 "SGE received CPL exceeding IQE size", -1, 1 },
2123 { F_ERR_INVALID_CIDX_INC,
2124 "SGE GTS CIDX increment too large", -1, 0 },
2125 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
2126 { F_ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
2127 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
2128 "SGE IQID > 1023 received CPL for FL", -1, 0 },
2129 { F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
2131 { F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
2133 { F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
2135 { F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
2137 { F_ERR_ING_CTXT_PRIO,
2138 "SGE too many priority ingress contexts", -1, 0 },
2139 { F_ERR_EGR_CTXT_PRIO,
2140 "SGE too many priority egress contexts", -1, 0 },
2141 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
2142 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
2146 v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
2147 ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
2149 CH_ALERT(adapter, "SGE parity error (%#llx)\n",
2150 (unsigned long long)v);
2151 t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
2152 t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
2155 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
2157 err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
2158 if (err & F_ERROR_QID_VALID) {
2159 CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
2160 if (err & F_UNCAPTURED_ERROR)
2161 CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
2162 t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
2163 F_UNCAPTURED_ERROR);
2167 t4_fatal_err(adapter);
2170 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
2171 F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
2172 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
2173 F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
2176 * CIM interrupt handler.
2178 static void cim_intr_handler(struct adapter *adapter)
2180 static struct intr_info cim_intr_info[] = {
2181 { F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
2182 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
2183 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
2184 { F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
2185 { F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
2186 { F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
2187 { F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
2190 static struct intr_info cim_upintr_info[] = {
2191 { F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
2192 { F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
2193 { F_ILLWRINT, "CIM illegal write", -1, 1 },
2194 { F_ILLRDINT, "CIM illegal read", -1, 1 },
2195 { F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
2196 { F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
2197 { F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
2198 { F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
2199 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
2200 { F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
2201 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
2202 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
2203 { F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
2204 { F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
2205 { F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
2206 { F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
2207 { F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
2208 { F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
2209 { F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
2210 { F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
2211 { F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
2212 { F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
2213 { F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
2214 { F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
2215 { F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
2216 { F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
2217 { F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
2218 { F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
2223 if (t4_read_reg(adapter, A_PCIE_FW) & F_PCIE_FW_ERR)
2224 t4_report_fw_error(adapter);
2226 fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
2228 t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
2231 t4_fatal_err(adapter);
2235 * ULP RX interrupt handler.
2237 static void ulprx_intr_handler(struct adapter *adapter)
2239 static struct intr_info ulprx_intr_info[] = {
2240 { F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
2241 { F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
2242 { 0x7fffff, "ULPRX parity error", -1, 1 },
2246 if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
2247 t4_fatal_err(adapter);
2251 * ULP TX interrupt handler.
2253 static void ulptx_intr_handler(struct adapter *adapter)
2255 static struct intr_info ulptx_intr_info[] = {
2256 { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
2258 { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
2260 { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
2262 { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
2264 { 0xfffffff, "ULPTX parity error", -1, 1 },
2268 if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
2269 t4_fatal_err(adapter);
2273 * PM TX interrupt handler.
2275 static void pmtx_intr_handler(struct adapter *adapter)
2277 static struct intr_info pmtx_intr_info[] = {
2278 { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
2279 { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
2280 { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
2281 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
2282 { 0xffffff0, "PMTX framing error", -1, 1 },
2283 { F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
2284 { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
2286 { F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
2287 { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
2291 if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
2292 t4_fatal_err(adapter);
2296 * PM RX interrupt handler.
2298 static void pmrx_intr_handler(struct adapter *adapter)
2300 static struct intr_info pmrx_intr_info[] = {
2301 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
2302 { 0x3ffff0, "PMRX framing error", -1, 1 },
2303 { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
2304 { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
2306 { F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
2307 { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
2311 if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
2312 t4_fatal_err(adapter);
2316 * CPL switch interrupt handler.
2318 static void cplsw_intr_handler(struct adapter *adapter)
2320 static struct intr_info cplsw_intr_info[] = {
2321 { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
2322 { F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
2323 { F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
2324 { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
2325 { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
2326 { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
2330 if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
2331 t4_fatal_err(adapter);
2335 * LE interrupt handler.
2337 static void le_intr_handler(struct adapter *adap)
2339 static struct intr_info le_intr_info[] = {
2340 { F_LIPMISS, "LE LIP miss", -1, 0 },
2341 { F_LIP0, "LE 0 LIP error", -1, 0 },
2342 { F_PARITYERR, "LE parity error", -1, 1 },
2343 { F_UNKNOWNCMD, "LE unknown command", -1, 1 },
2344 { F_REQQPARERR, "LE request queue parity error", -1, 1 },
2348 if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE, le_intr_info))
2353 * MPS interrupt handler.
2355 static void mps_intr_handler(struct adapter *adapter)
2357 static struct intr_info mps_rx_intr_info[] = {
2358 { 0xffffff, "MPS Rx parity error", -1, 1 },
2361 static struct intr_info mps_tx_intr_info[] = {
2362 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
2363 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
2364 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
2366 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
2368 { F_BUBBLE, "MPS Tx underflow", -1, 1 },
2369 { F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
2370 { F_FRMERR, "MPS Tx framing error", -1, 1 },
2373 static struct intr_info mps_trc_intr_info[] = {
2374 { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
2375 { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
2377 { F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
2380 static struct intr_info mps_stat_sram_intr_info[] = {
2381 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
2384 static struct intr_info mps_stat_tx_intr_info[] = {
2385 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
2388 static struct intr_info mps_stat_rx_intr_info[] = {
2389 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
2392 static struct intr_info mps_cls_intr_info[] = {
2393 { F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
2394 { F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
2395 { F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
2401 fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
2403 t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
2405 t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
2406 mps_trc_intr_info) +
2407 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
2408 mps_stat_sram_intr_info) +
2409 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
2410 mps_stat_tx_intr_info) +
2411 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
2412 mps_stat_rx_intr_info) +
2413 t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
2416 t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
2417 t4_read_reg(adapter, A_MPS_INT_CAUSE); /* flush */
2419 t4_fatal_err(adapter);
2422 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | F_ECC_UE_INT_CAUSE)
2425 * EDC/MC interrupt handler.
2427 static void mem_intr_handler(struct adapter *adapter, int idx)
2429 static const char name[3][5] = { "EDC0", "EDC1", "MC" };
2431 unsigned int addr, cnt_addr, v;
2433 if (idx <= MEM_EDC1) {
2434 addr = EDC_REG(A_EDC_INT_CAUSE, idx);
2435 cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
2437 if (is_t4(adapter)) {
2438 addr = A_MC_INT_CAUSE;
2439 cnt_addr = A_MC_ECC_STATUS;
2441 addr = A_MC_P_INT_CAUSE;
2442 cnt_addr = A_MC_P_ECC_STATUS;
2446 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
2447 if (v & F_PERR_INT_CAUSE)
2448 CH_ALERT(adapter, "%s FIFO parity error\n", name[idx]);
2449 if (v & F_ECC_CE_INT_CAUSE) {
2450 u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
2452 t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
2453 CH_WARN_RATELIMIT(adapter,
2454 "%u %s correctable ECC data error%s\n",
2455 cnt, name[idx], cnt > 1 ? "s" : "");
2457 if (v & F_ECC_UE_INT_CAUSE)
2458 CH_ALERT(adapter, "%s uncorrectable ECC data error\n",
2461 t4_write_reg(adapter, addr, v);
2462 if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
2463 t4_fatal_err(adapter);
2467 * MA interrupt handler.
2469 static void ma_intr_handler(struct adapter *adapter)
2471 u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
2473 if (status & F_MEM_PERR_INT_CAUSE) {
2474 CH_ALERT(adapter, "MA parity error, parity status %#x\n",
2475 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS1));
2478 "MA parity error, parity status %#x\n",
2479 t4_read_reg(adapter,
2480 A_MA_PARITY_ERROR_STATUS2));
2482 if (status & F_MEM_WRAP_INT_CAUSE) {
2483 v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
2484 CH_ALERT(adapter, "MA address wrap-around error by client %u to"
2485 " address %#x\n", G_MEM_WRAP_CLIENT_NUM(v),
2486 G_MEM_WRAP_ADDRESS(v) << 4);
2488 t4_write_reg(adapter, A_MA_INT_CAUSE, status);
2489 t4_fatal_err(adapter);
2493 * SMB interrupt handler.
2495 static void smb_intr_handler(struct adapter *adap)
2497 static struct intr_info smb_intr_info[] = {
2498 { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
2499 { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
2500 { F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
2504 if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
2509 * NC-SI interrupt handler.
2511 static void ncsi_intr_handler(struct adapter *adap)
2513 static struct intr_info ncsi_intr_info[] = {
2514 { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
2515 { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
2516 { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
2517 { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
2521 if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
2526 * XGMAC interrupt handler.
2528 static void xgmac_intr_handler(struct adapter *adap, int port)
2530 u32 v, int_cause_reg;
2533 int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
2535 int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
2537 v = t4_read_reg(adap, int_cause_reg);
2538 v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR);
2542 if (v & F_TXFIFO_PRTY_ERR)
2543 CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n", port);
2544 if (v & F_RXFIFO_PRTY_ERR)
2545 CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n", port);
2546 t4_write_reg(adap, int_cause_reg, v);
2551 * PL interrupt handler.
2553 static void pl_intr_handler(struct adapter *adap)
2555 static struct intr_info pl_intr_info[] = {
2556 { F_FATALPERR, "Fatal parity error", -1, 1 },
2557 { F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
2561 static struct intr_info t5_pl_intr_info[] = {
2562 { F_PL_BUSPERR, "PL bus parity error", -1, 1 },
2563 { F_FATALPERR, "Fatal parity error", -1, 1 },
2567 if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE,
2568 is_t4(adap) ? pl_intr_info : t5_pl_intr_info))
2572 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
2573 #define GLBL_INTR_MASK (F_CIM | F_MPS | F_PL | F_PCIE | F_MC | F_EDC0 | \
2574 F_EDC1 | F_LE | F_TP | F_MA | F_PM_TX | F_PM_RX | F_ULP_RX | \
2575 F_CPL_SWITCH | F_SGE | F_ULP_TX)
2578 * t4_slow_intr_handler - control path interrupt handler
2579 * @adapter: the adapter
2581 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
2582 * The designation 'slow' is because it involves register reads, while
2583 * data interrupts typically don't involve any MMIOs.
2585 int t4_slow_intr_handler(struct adapter *adapter)
2587 u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
2589 if (!(cause & GLBL_INTR_MASK))
2592 cim_intr_handler(adapter);
2594 mps_intr_handler(adapter);
2596 ncsi_intr_handler(adapter);
2598 pl_intr_handler(adapter);
2600 smb_intr_handler(adapter);
2601 if (cause & F_XGMAC0)
2602 xgmac_intr_handler(adapter, 0);
2603 if (cause & F_XGMAC1)
2604 xgmac_intr_handler(adapter, 1);
2605 if (cause & F_XGMAC_KR0)
2606 xgmac_intr_handler(adapter, 2);
2607 if (cause & F_XGMAC_KR1)
2608 xgmac_intr_handler(adapter, 3);
2610 pcie_intr_handler(adapter);
2612 mem_intr_handler(adapter, MEM_MC);
2614 mem_intr_handler(adapter, MEM_EDC0);
2616 mem_intr_handler(adapter, MEM_EDC1);
2618 le_intr_handler(adapter);
2620 tp_intr_handler(adapter);
2622 ma_intr_handler(adapter);
2623 if (cause & F_PM_TX)
2624 pmtx_intr_handler(adapter);
2625 if (cause & F_PM_RX)
2626 pmrx_intr_handler(adapter);
2627 if (cause & F_ULP_RX)
2628 ulprx_intr_handler(adapter);
2629 if (cause & F_CPL_SWITCH)
2630 cplsw_intr_handler(adapter);
2632 sge_intr_handler(adapter);
2633 if (cause & F_ULP_TX)
2634 ulptx_intr_handler(adapter);
2636 /* Clear the interrupts just processed for which we are the master. */
2637 t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
2638 (void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
2643 * t4_intr_enable - enable interrupts
2644 * @adapter: the adapter whose interrupts should be enabled
2646 * Enable PF-specific interrupts for the calling function and the top-level
2647 * interrupt concentrator for global interrupts. Interrupts are already
2648 * enabled at each module, here we just enable the roots of the interrupt
2651 * Note: this function should be called only when the driver manages
2652 * non PF-specific interrupts from the various HW modules. Only one PCI
2653 * function at a time should be doing this.
2655 void t4_intr_enable(struct adapter *adapter)
2657 u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2659 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
2660 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
2661 F_ERR_DROPPED_DB | F_ERR_DATA_CPL_ON_HIGH_QID1 |
2662 F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
2663 F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
2664 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
2665 F_ERR_EGR_CTXT_PRIO | F_INGRESS_SIZE_ERR |
2667 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
2668 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
2672 * t4_intr_disable - disable interrupts
2673 * @adapter: the adapter whose interrupts should be disabled
2675 * Disable interrupts. We only disable the top-level interrupt
2676 * concentrators. The caller must be a PCI function managing global
2679 void t4_intr_disable(struct adapter *adapter)
2681 u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2683 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
2684 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
2688 * t4_intr_clear - clear all interrupts
2689 * @adapter: the adapter whose interrupts should be cleared
2691 * Clears all interrupts. The caller must be a PCI function managing
2692 * global interrupts.
2694 void t4_intr_clear(struct adapter *adapter)
2696 static const unsigned int cause_reg[] = {
2697 A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
2698 A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
2699 A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS1, A_MA_INT_CAUSE,
2700 A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
2701 A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
2702 MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
2704 A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE,
2705 A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE,
2706 A_MPS_RX_PERR_INT_CAUSE,
2708 MYPF_REG(A_PL_PF_INT_CAUSE),
2715 for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
2716 t4_write_reg(adapter, cause_reg[i], 0xffffffff);
2718 t4_write_reg(adapter, is_t4(adapter) ? A_MC_INT_CAUSE :
2719 A_MC_P_INT_CAUSE, 0xffffffff);
2721 if (is_t4(adapter)) {
2722 t4_write_reg(adapter, A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
2724 t4_write_reg(adapter, A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
2727 t4_write_reg(adapter, A_MA_PARITY_ERROR_STATUS2, 0xffffffff);
2729 t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
2730 (void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
2734 * hash_mac_addr - return the hash value of a MAC address
2735 * @addr: the 48-bit Ethernet MAC address
2737 * Hashes a MAC address according to the hash function used by HW inexact
2738 * (hash) address matching.
2740 static int hash_mac_addr(const u8 *addr)
2742 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
2743 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
2751 * t4_config_rss_range - configure a portion of the RSS mapping table
2752 * @adapter: the adapter
2753 * @mbox: mbox to use for the FW command
2754 * @viid: virtual interface whose RSS subtable is to be written
2755 * @start: start entry in the table to write
2756 * @n: how many table entries to write
2757 * @rspq: values for the "response queue" (Ingress Queue) lookup table
2758 * @nrspq: number of values in @rspq
2760 * Programs the selected part of the VI's RSS mapping table with the
2761 * provided values. If @nrspq < @n the supplied values are used repeatedly
2762 * until the full table range is populated.
2764 * The caller must ensure the values in @rspq are in the range allowed for
2767 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2768 int start, int n, const u16 *rspq, unsigned int nrspq)
2771 const u16 *rsp = rspq;
2772 const u16 *rsp_end = rspq + nrspq;
2773 struct fw_rss_ind_tbl_cmd cmd;
2775 memset(&cmd, 0, sizeof(cmd));
2776 cmd.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2777 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2778 V_FW_RSS_IND_TBL_CMD_VIID(viid));
2779 cmd.retval_len16 = htonl(FW_LEN16(cmd));
2783 * Each firmware RSS command can accommodate up to 32 RSS Ingress
2784 * Queue Identifiers. These Ingress Queue IDs are packed three to
2785 * a 32-bit word as 10-bit values with the upper remaining 2 bits
2789 int nq = min(n, 32);
2791 __be32 *qp = &cmd.iq0_to_iq2;
2794 * Set up the firmware RSS command header to send the next
2795 * "nq" Ingress Queue IDs to the firmware.
2797 cmd.niqid = htons(nq);
2798 cmd.startidx = htons(start);
2801 * "nq" more done for the start of the next loop.
2807 * While there are still Ingress Queue IDs to stuff into the
2808 * current firmware RSS command, retrieve them from the
2809 * Ingress Queue ID array and insert them into the command.
2813 * Grab up to the next 3 Ingress Queue IDs (wrapping
2814 * around the Ingress Queue ID array if necessary) and
2815 * insert them into the firmware RSS command at the
2816 * current 3-tuple position within the commad.
2820 int nqbuf = min(3, nq);
2823 qbuf[0] = qbuf[1] = qbuf[2] = 0;
2824 while (nqbuf && nq_packed < 32) {
2831 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
2832 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
2833 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
2837 * Send this portion of the RRS table update to the firmware;
2838 * bail out on any errors.
2840 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
2849 * t4_config_glbl_rss - configure the global RSS mode
2850 * @adapter: the adapter
2851 * @mbox: mbox to use for the FW command
2852 * @mode: global RSS mode
2853 * @flags: mode-specific flags
2855 * Sets the global RSS mode.
2857 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2860 struct fw_rss_glb_config_cmd c;
2862 memset(&c, 0, sizeof(c));
2863 c.op_to_write = htonl(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
2864 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2865 c.retval_len16 = htonl(FW_LEN16(c));
2866 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
2867 c.u.manual.mode_pkd = htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2868 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2869 c.u.basicvirtual.mode_pkd =
2870 htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2871 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2874 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2878 * t4_config_vi_rss - configure per VI RSS settings
2879 * @adapter: the adapter
2880 * @mbox: mbox to use for the FW command
2883 * @defq: id of the default RSS queue for the VI.
2885 * Configures VI-specific RSS properties.
2887 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
2888 unsigned int flags, unsigned int defq)
2890 struct fw_rss_vi_config_cmd c;
2892 memset(&c, 0, sizeof(c));
2893 c.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2894 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2895 V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
2896 c.retval_len16 = htonl(FW_LEN16(c));
2897 c.u.basicvirtual.defaultq_to_udpen = htonl(flags |
2898 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
2899 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2902 /* Read an RSS table row */
2903 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
2905 t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
2906 return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
2911 * t4_read_rss - read the contents of the RSS mapping table
2912 * @adapter: the adapter
2913 * @map: holds the contents of the RSS mapping table
2915 * Reads the contents of the RSS hash->queue mapping table.
2917 int t4_read_rss(struct adapter *adapter, u16 *map)
2922 for (i = 0; i < RSS_NENTRIES / 2; ++i) {
2923 ret = rd_rss_row(adapter, i, &val);
2926 *map++ = G_LKPTBLQUEUE0(val);
2927 *map++ = G_LKPTBLQUEUE1(val);
2933 * t4_read_rss_key - read the global RSS key
2934 * @adap: the adapter
2935 * @key: 10-entry array holding the 320-bit RSS key
2937 * Reads the global 320-bit RSS key.
2939 void t4_read_rss_key(struct adapter *adap, u32 *key)
2941 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2942 A_TP_RSS_SECRET_KEY0);
2946 * t4_write_rss_key - program one of the RSS keys
2947 * @adap: the adapter
2948 * @key: 10-entry array holding the 320-bit RSS key
2949 * @idx: which RSS key to write
2951 * Writes one of the RSS keys with the given 320-bit value. If @idx is
2952 * 0..15 the corresponding entry in the RSS key table is written,
2953 * otherwise the global RSS key is written.
2955 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
2957 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2958 A_TP_RSS_SECRET_KEY0);
2959 if (idx >= 0 && idx < 16)
2960 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
2961 V_KEYWRADDR(idx) | F_KEYWREN);
2965 * t4_read_rss_pf_config - read PF RSS Configuration Table
2966 * @adapter: the adapter
2967 * @index: the entry in the PF RSS table to read
2968 * @valp: where to store the returned value
2970 * Reads the PF RSS Configuration Table at the specified index and returns
2971 * the value found there.
2973 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp)
2975 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2976 valp, 1, A_TP_RSS_PF0_CONFIG + index);
2980 * t4_write_rss_pf_config - write PF RSS Configuration Table
2981 * @adapter: the adapter
2982 * @index: the entry in the VF RSS table to read
2983 * @val: the value to store
2985 * Writes the PF RSS Configuration Table at the specified index with the
2988 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, u32 val)
2990 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2991 &val, 1, A_TP_RSS_PF0_CONFIG + index);
2995 * t4_read_rss_vf_config - read VF RSS Configuration Table
2996 * @adapter: the adapter
2997 * @index: the entry in the VF RSS table to read
2998 * @vfl: where to store the returned VFL
2999 * @vfh: where to store the returned VFH
3001 * Reads the VF RSS Configuration Table at the specified index and returns
3002 * the (VFL, VFH) values found there.
3004 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
3010 * Request that the index'th VF Table values be read into VFL/VFH.
3012 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
3013 vrt &= ~(F_VFRDRG | V_VFWRADDR(M_VFWRADDR) | F_VFWREN | F_KEYWREN);
3014 vrt |= V_VFWRADDR(index) | F_VFRDEN;
3015 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
3018 * Grab the VFL/VFH values ...
3020 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3021 vfl, 1, A_TP_RSS_VFL_CONFIG);
3022 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3023 vfh, 1, A_TP_RSS_VFH_CONFIG);
3027 * t4_write_rss_vf_config - write VF RSS Configuration Table
3029 * @adapter: the adapter
3030 * @index: the entry in the VF RSS table to write
3031 * @vfl: the VFL to store
3032 * @vfh: the VFH to store
3034 * Writes the VF RSS Configuration Table at the specified index with the
3035 * specified (VFL, VFH) values.
3037 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
3043 * Load up VFL/VFH with the values to be written ...
3045 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3046 &vfl, 1, A_TP_RSS_VFL_CONFIG);
3047 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3048 &vfh, 1, A_TP_RSS_VFH_CONFIG);
3051 * Write the VFL/VFH into the VF Table at index'th location.
3053 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
3054 vrt &= ~(F_VFRDRG | F_VFRDEN | V_VFWRADDR(M_VFWRADDR) | F_KEYWREN);
3055 vrt |= V_VFWRADDR(index) | F_VFWREN;
3056 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
3060 * t4_read_rss_pf_map - read PF RSS Map
3061 * @adapter: the adapter
3063 * Reads the PF RSS Map register and returns its value.
3065 u32 t4_read_rss_pf_map(struct adapter *adapter)
3069 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3070 &pfmap, 1, A_TP_RSS_PF_MAP);
3075 * t4_write_rss_pf_map - write PF RSS Map
3076 * @adapter: the adapter
3077 * @pfmap: PF RSS Map value
3079 * Writes the specified value to the PF RSS Map register.
3081 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap)
3083 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3084 &pfmap, 1, A_TP_RSS_PF_MAP);
3088 * t4_read_rss_pf_mask - read PF RSS Mask
3089 * @adapter: the adapter
3091 * Reads the PF RSS Mask register and returns its value.
3093 u32 t4_read_rss_pf_mask(struct adapter *adapter)
3097 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3098 &pfmask, 1, A_TP_RSS_PF_MSK);
3103 * t4_write_rss_pf_mask - write PF RSS Mask
3104 * @adapter: the adapter
3105 * @pfmask: PF RSS Mask value
3107 * Writes the specified value to the PF RSS Mask register.
3109 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask)
3111 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3112 &pfmask, 1, A_TP_RSS_PF_MSK);
3115 static void refresh_vlan_pri_map(struct adapter *adap)
3118 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3119 &adap->params.tp.vlan_pri_map, 1,
3123 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
3124 * shift positions of several elements of the Compressed Filter Tuple
3125 * for this adapter which we need frequently ...
3127 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
3128 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
3129 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
3130 adap->params.tp.protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
3133 * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
3134 * represents the presense of an Outer VLAN instead of a VNIC ID.
3136 if ((adap->params.tp.ingress_config & F_VNIC) == 0)
3137 adap->params.tp.vnic_shift = -1;
3141 * t4_set_filter_mode - configure the optional components of filter tuples
3142 * @adap: the adapter
3143 * @mode_map: a bitmap selcting which optional filter components to enable
3145 * Sets the filter mode by selecting the optional components to enable
3146 * in filter tuples. Returns 0 on success and a negative error if the
3147 * requested mode needs more bits than are available for optional
3150 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map)
3152 static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
3156 for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
3157 if (mode_map & (1 << i))
3159 if (nbits > FILTER_OPT_LEN)
3161 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map, 1,
3163 refresh_vlan_pri_map(adap);
3169 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
3170 * @adap: the adapter
3171 * @v4: holds the TCP/IP counter values
3172 * @v6: holds the TCP/IPv6 counter values
3174 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
3175 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
3177 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
3178 struct tp_tcp_stats *v6)
3180 u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
3182 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
3183 #define STAT(x) val[STAT_IDX(x)]
3184 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
3187 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
3188 ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST);
3189 v4->tcpOutRsts = STAT(OUT_RST);
3190 v4->tcpInSegs = STAT64(IN_SEG);
3191 v4->tcpOutSegs = STAT64(OUT_SEG);
3192 v4->tcpRetransSegs = STAT64(RXT_SEG);
3195 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
3196 ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST);
3197 v6->tcpOutRsts = STAT(OUT_RST);
3198 v6->tcpInSegs = STAT64(IN_SEG);
3199 v6->tcpOutSegs = STAT64(OUT_SEG);
3200 v6->tcpRetransSegs = STAT64(RXT_SEG);
3208 * t4_tp_get_err_stats - read TP's error MIB counters
3209 * @adap: the adapter
3210 * @st: holds the counter values
3212 * Returns the values of TP's error counters.
3214 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
3216 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->macInErrs,
3217 12, A_TP_MIB_MAC_IN_ERR_0);
3218 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlCongDrops,
3219 8, A_TP_MIB_TNL_CNG_DROP_0);
3220 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlTxDrops,
3221 4, A_TP_MIB_TNL_DROP_0);
3222 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->ofldVlanDrops,
3223 4, A_TP_MIB_OFD_VLN_DROP_0);
3224 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tcp6InErrs,
3225 4, A_TP_MIB_TCP_V6IN_ERR_0);
3226 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->ofldNoNeigh,
3227 2, A_TP_MIB_OFD_ARP_DROP);
3231 * t4_tp_get_proxy_stats - read TP's proxy MIB counters
3232 * @adap: the adapter
3233 * @st: holds the counter values
3235 * Returns the values of TP's proxy counters.
3237 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st)
3239 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy,
3240 4, A_TP_MIB_TNL_LPBK_0);
3244 * t4_tp_get_cpl_stats - read TP's CPL MIB counters
3245 * @adap: the adapter
3246 * @st: holds the counter values
3248 * Returns the values of TP's CPL counters.
3250 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
3252 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req,
3253 8, A_TP_MIB_CPL_IN_REQ_0);
3257 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
3258 * @adap: the adapter
3259 * @st: holds the counter values
3261 * Returns the values of TP's RDMA counters.
3263 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
3265 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_mod,
3266 2, A_TP_MIB_RQE_DFR_MOD);
3270 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
3271 * @adap: the adapter
3272 * @idx: the port index
3273 * @st: holds the counter values
3275 * Returns the values of TP's FCoE counters for the selected port.
3277 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
3278 struct tp_fcoe_stats *st)
3282 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDDP,
3283 1, A_TP_MIB_FCOE_DDP_0 + idx);
3284 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDrop,
3285 1, A_TP_MIB_FCOE_DROP_0 + idx);
3286 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
3287 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx);
3288 st->octetsDDP = ((u64)val[0] << 32) | val[1];
3292 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
3293 * @adap: the adapter
3294 * @st: holds the counter values
3296 * Returns the values of TP's counters for non-TCP directly-placed packets.
3298 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
3302 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4,
3304 st->frames = val[0];
3306 st->octets = ((u64)val[2] << 32) | val[3];
3310 * t4_read_mtu_tbl - returns the values in the HW path MTU table
3311 * @adap: the adapter
3312 * @mtus: where to store the MTU values
3313 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
3315 * Reads the HW path MTU table.
3317 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
3322 for (i = 0; i < NMTUS; ++i) {
3323 t4_write_reg(adap, A_TP_MTU_TABLE,
3324 V_MTUINDEX(0xff) | V_MTUVALUE(i));
3325 v = t4_read_reg(adap, A_TP_MTU_TABLE);
3326 mtus[i] = G_MTUVALUE(v);
3328 mtu_log[i] = G_MTUWIDTH(v);
3333 * t4_read_cong_tbl - reads the congestion control table
3334 * @adap: the adapter
3335 * @incr: where to store the alpha values
3337 * Reads the additive increments programmed into the HW congestion
3340 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
3342 unsigned int mtu, w;
3344 for (mtu = 0; mtu < NMTUS; ++mtu)
3345 for (w = 0; w < NCCTRL_WIN; ++w) {
3346 t4_write_reg(adap, A_TP_CCTRL_TABLE,
3347 V_ROWINDEX(0xffff) | (mtu << 5) | w);
3348 incr[mtu][w] = (u16)t4_read_reg(adap,
3349 A_TP_CCTRL_TABLE) & 0x1fff;
3354 * t4_read_pace_tbl - read the pace table
3355 * @adap: the adapter
3356 * @pace_vals: holds the returned values
3358 * Returns the values of TP's pace table in microseconds.
3360 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
3364 for (i = 0; i < NTX_SCHED; i++) {
3365 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
3366 v = t4_read_reg(adap, A_TP_PACE_TABLE);
3367 pace_vals[i] = dack_ticks_to_usec(adap, v);
3372 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
3373 * @adap: the adapter
3374 * @addr: the indirect TP register address
3375 * @mask: specifies the field within the register to modify
3376 * @val: new value for the field
3378 * Sets a field of an indirect TP register to the given value.
3380 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
3381 unsigned int mask, unsigned int val)
3383 t4_write_reg(adap, A_TP_PIO_ADDR, addr);
3384 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
3385 t4_write_reg(adap, A_TP_PIO_DATA, val);
3389 * init_cong_ctrl - initialize congestion control parameters
3390 * @a: the alpha values for congestion control
3391 * @b: the beta values for congestion control
3393 * Initialize the congestion control parameters.
3395 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
3397 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
3422 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
3425 b[13] = b[14] = b[15] = b[16] = 3;
3426 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
3427 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
3432 /* The minimum additive increment value for the congestion control table */
3433 #define CC_MIN_INCR 2U
3436 * t4_load_mtus - write the MTU and congestion control HW tables
3437 * @adap: the adapter
3438 * @mtus: the values for the MTU table
3439 * @alpha: the values for the congestion control alpha parameter
3440 * @beta: the values for the congestion control beta parameter
3442 * Write the HW MTU table with the supplied MTUs and the high-speed
3443 * congestion control table with the supplied alpha, beta, and MTUs.
3444 * We write the two tables together because the additive increments
3445 * depend on the MTUs.
3447 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
3448 const unsigned short *alpha, const unsigned short *beta)
3450 static const unsigned int avg_pkts[NCCTRL_WIN] = {
3451 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
3452 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
3453 28672, 40960, 57344, 81920, 114688, 163840, 229376
3458 for (i = 0; i < NMTUS; ++i) {
3459 unsigned int mtu = mtus[i];
3460 unsigned int log2 = fls(mtu);
3462 if (!(mtu & ((1 << log2) >> 2))) /* round */
3464 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
3465 V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
3467 for (w = 0; w < NCCTRL_WIN; ++w) {
3470 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3473 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3474 (w << 16) | (beta[w] << 13) | inc);
3480 * t4_set_pace_tbl - set the pace table
3481 * @adap: the adapter
3482 * @pace_vals: the pace values in microseconds
3483 * @start: index of the first entry in the HW pace table to set
3484 * @n: how many entries to set
3486 * Sets (a subset of the) HW pace table.
3488 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
3489 unsigned int start, unsigned int n)
3491 unsigned int vals[NTX_SCHED], i;
3492 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
3497 /* convert values from us to dack ticks, rounding to closest value */
3498 for (i = 0; i < n; i++, pace_vals++) {
3499 vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
3500 if (vals[i] > 0x7ff)
3502 if (*pace_vals && vals[i] == 0)
3505 for (i = 0; i < n; i++, start++)
3506 t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
3511 * t4_set_sched_bps - set the bit rate for a HW traffic scheduler
3512 * @adap: the adapter
3513 * @kbps: target rate in Kbps
3514 * @sched: the scheduler index
3516 * Configure a Tx HW scheduler for the target rate.
3518 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
3520 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3521 unsigned int clk = adap->params.vpd.cclk * 1000;
3522 unsigned int selected_cpt = 0, selected_bpt = 0;
3525 kbps *= 125; /* -> bytes */
3526 for (cpt = 1; cpt <= 255; cpt++) {
3528 bpt = (kbps + tps / 2) / tps;
3529 if (bpt > 0 && bpt <= 255) {
3531 delta = v >= kbps ? v - kbps : kbps - v;
3532 if (delta < mindelta) {
3537 } else if (selected_cpt)
3543 t4_write_reg(adap, A_TP_TM_PIO_ADDR,
3544 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3545 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3547 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3549 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3550 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3555 * t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
3556 * @adap: the adapter
3557 * @sched: the scheduler index
3558 * @ipg: the interpacket delay in tenths of nanoseconds
3560 * Set the interpacket delay for a HW packet rate scheduler.
3562 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
3564 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3566 /* convert ipg to nearest number of core clocks */
3567 ipg *= core_ticks_per_usec(adap);
3568 ipg = (ipg + 5000) / 10000;
3569 if (ipg > M_TXTIMERSEPQ0)
3572 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3573 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3575 v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
3577 v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
3578 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3579 t4_read_reg(adap, A_TP_TM_PIO_DATA);
3584 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3585 * @adap: the adapter
3586 * @sched: the scheduler index
3587 * @kbps: the byte rate in Kbps
3588 * @ipg: the interpacket delay in tenths of nanoseconds
3590 * Return the current configuration of a HW Tx scheduler.
3592 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
3595 unsigned int v, addr, bpt, cpt;
3598 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3599 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3600 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3603 bpt = (v >> 8) & 0xff;
3606 *kbps = 0; /* scheduler disabled */
3608 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
3609 *kbps = (v * bpt) / 125;
3613 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3614 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3615 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3619 *ipg = (10000 * v) / core_ticks_per_usec(adap);
3624 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
3625 * clocks. The formula is
3627 * bytes/s = bytes256 * 256 * ClkFreq / 4096
3629 * which is equivalent to
3631 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
3633 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
3635 u64 v = bytes256 * adap->params.vpd.cclk;
3637 return v * 62 + v / 2;
3641 * t4_get_chan_txrate - get the current per channel Tx rates
3642 * @adap: the adapter
3643 * @nic_rate: rates for NIC traffic
3644 * @ofld_rate: rates for offloaded traffic
3646 * Return the current Tx rates in bytes/s for NIC and offloaded traffic
3649 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
3653 v = t4_read_reg(adap, A_TP_TX_TRATE);
3654 nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
3655 nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
3656 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
3657 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
3659 v = t4_read_reg(adap, A_TP_TX_ORATE);
3660 ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
3661 ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
3662 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
3663 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
3667 * t4_set_trace_filter - configure one of the tracing filters
3668 * @adap: the adapter
3669 * @tp: the desired trace filter parameters
3670 * @idx: which filter to configure
3671 * @enable: whether to enable or disable the filter
3673 * Configures one of the tracing filters available in HW. If @tp is %NULL
3674 * it indicates that the filter is already written in the register and it
3675 * just needs to be enabled or disabled.
3677 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
3678 int idx, int enable)
3680 int i, ofst = idx * 4;
3681 u32 data_reg, mask_reg, cfg;
3682 u32 multitrc = F_TRCMULTIFILTER;
3683 u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN;
3685 if (idx < 0 || idx >= NTRACE)
3688 if (tp == NULL || !enable) {
3689 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en,
3695 * TODO - After T4 data book is updated, specify the exact
3698 * See T4 data book - MPS section for a complete description
3699 * of the below if..else handling of A_MPS_TRC_CFG register
3702 cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
3703 if (cfg & F_TRCMULTIFILTER) {
3705 * If multiple tracers are enabled, then maximum
3706 * capture size is 2.5KB (FIFO size of a single channel)
3707 * minus 2 flits for CPL_TRACE_PKT header.
3709 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
3713 * If multiple tracers are disabled, to avoid deadlocks
3714 * maximum packet capture size of 9600 bytes is recommended.
3715 * Also in this mode, only trace0 can be enabled and running.
3718 if (tp->snap_len > 9600 || idx)
3722 if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 ||
3723 tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET ||
3724 tp->min_len > M_TFMINPKTSIZE)
3727 /* stop the tracer we'll be changing */
3728 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0);
3730 idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
3731 data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
3732 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
3734 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3735 t4_write_reg(adap, data_reg, tp->data[i]);
3736 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
3738 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
3739 V_TFCAPTUREMAX(tp->snap_len) |
3740 V_TFMINPKTSIZE(tp->min_len));
3741 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
3742 V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en |
3744 V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) :
3745 V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert)));
3751 * t4_get_trace_filter - query one of the tracing filters
3752 * @adap: the adapter
3753 * @tp: the current trace filter parameters
3754 * @idx: which trace filter to query
3755 * @enabled: non-zero if the filter is enabled
3757 * Returns the current settings of one of the HW tracing filters.
3759 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
3763 int i, ofst = idx * 4;
3764 u32 data_reg, mask_reg;
3766 ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
3767 ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
3770 *enabled = !!(ctla & F_TFEN);
3771 tp->port = G_TFPORT(ctla);
3772 tp->invert = !!(ctla & F_TFINVERTMATCH);
3774 *enabled = !!(ctla & F_T5_TFEN);
3775 tp->port = G_T5_TFPORT(ctla);
3776 tp->invert = !!(ctla & F_T5_TFINVERTMATCH);
3778 tp->snap_len = G_TFCAPTUREMAX(ctlb);
3779 tp->min_len = G_TFMINPKTSIZE(ctlb);
3780 tp->skip_ofst = G_TFOFFSET(ctla);
3781 tp->skip_len = G_TFLENGTH(ctla);
3783 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
3784 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
3785 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
3787 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3788 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
3789 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
3794 * t4_pmtx_get_stats - returns the HW stats from PMTX
3795 * @adap: the adapter
3796 * @cnt: where to store the count statistics
3797 * @cycles: where to store the cycle statistics
3799 * Returns performance statistics from PMTX.
3801 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3806 for (i = 0; i < PM_NSTATS; i++) {
3807 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
3808 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
3810 cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
3812 t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
3813 A_PM_TX_DBG_DATA, data, 2,
3814 A_PM_TX_DBG_STAT_MSB);
3815 cycles[i] = (((u64)data[0] << 32) | data[1]);
3821 * t4_pmrx_get_stats - returns the HW stats from PMRX
3822 * @adap: the adapter
3823 * @cnt: where to store the count statistics
3824 * @cycles: where to store the cycle statistics
3826 * Returns performance statistics from PMRX.
3828 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3833 for (i = 0; i < PM_NSTATS; i++) {
3834 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
3835 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
3837 cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
3839 t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
3840 A_PM_RX_DBG_DATA, data, 2,
3841 A_PM_RX_DBG_STAT_MSB);
3842 cycles[i] = (((u64)data[0] << 32) | data[1]);
3848 * get_mps_bg_map - return the buffer groups associated with a port
3849 * @adap: the adapter
3850 * @idx: the port index
3852 * Returns a bitmap indicating which MPS buffer groups are associated
3853 * with the given port. Bit i is set if buffer group i is used by the
3856 static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
3858 u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
3861 return idx == 0 ? 0xf : 0;
3863 return idx < 2 ? (3 << (2 * idx)) : 0;
3868 * t4_get_port_stats_offset - collect port stats relative to a previous
3870 * @adap: The adapter
3872 * @stats: Current stats to fill
3873 * @offset: Previous stats snapshot
3875 void t4_get_port_stats_offset(struct adapter *adap, int idx,
3876 struct port_stats *stats,
3877 struct port_stats *offset)
3882 t4_get_port_stats(adap, idx, stats);
3883 for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
3884 i < (sizeof(struct port_stats)/sizeof(u64)) ;
3890 * t4_get_port_stats - collect port statistics
3891 * @adap: the adapter
3892 * @idx: the port index
3893 * @p: the stats structure to fill
3895 * Collect statistics related to the given port from HW.
3897 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
3899 u32 bgmap = get_mps_bg_map(adap, idx);
3901 #define GET_STAT(name) \
3902 t4_read_reg64(adap, \
3903 (is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
3904 T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
3905 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3907 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
3908 p->tx_octets = GET_STAT(TX_PORT_BYTES);
3909 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
3910 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
3911 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
3912 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
3913 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
3914 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
3915 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
3916 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
3917 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
3918 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
3919 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
3920 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
3921 p->tx_drop = GET_STAT(TX_PORT_DROP);
3922 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
3923 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
3924 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
3925 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
3926 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
3927 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
3928 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
3929 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
3931 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
3932 p->rx_octets = GET_STAT(RX_PORT_BYTES);
3933 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
3934 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
3935 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
3936 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
3937 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
3938 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
3939 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
3940 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
3941 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
3942 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
3943 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
3944 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
3945 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
3946 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
3947 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
3948 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
3949 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
3950 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
3951 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
3952 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
3953 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
3954 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
3955 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
3956 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
3957 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
3959 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
3960 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
3961 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
3962 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
3963 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
3964 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
3965 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
3966 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
3973 * t4_clr_port_stats - clear port statistics
3974 * @adap: the adapter
3975 * @idx: the port index
3977 * Clear HW statistics for the given port.
3979 void t4_clr_port_stats(struct adapter *adap, int idx)
3982 u32 bgmap = get_mps_bg_map(adap, idx);
3986 port_base_addr = PORT_BASE(idx);
3988 port_base_addr = T5_PORT_BASE(idx);
3990 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
3991 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
3992 t4_write_reg(adap, port_base_addr + i, 0);
3993 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
3994 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
3995 t4_write_reg(adap, port_base_addr + i, 0);
3996 for (i = 0; i < 4; i++)
3997 if (bgmap & (1 << i)) {
3999 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
4001 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
4006 * t4_get_lb_stats - collect loopback port statistics
4007 * @adap: the adapter
4008 * @idx: the loopback port index
4009 * @p: the stats structure to fill
4011 * Return HW statistics for the given loopback port.
4013 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
4015 u32 bgmap = get_mps_bg_map(adap, idx);
4017 #define GET_STAT(name) \
4018 t4_read_reg64(adap, \
4020 PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
4021 T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
4022 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
4024 p->octets = GET_STAT(BYTES);
4025 p->frames = GET_STAT(FRAMES);
4026 p->bcast_frames = GET_STAT(BCAST);
4027 p->mcast_frames = GET_STAT(MCAST);
4028 p->ucast_frames = GET_STAT(UCAST);
4029 p->error_frames = GET_STAT(ERROR);
4031 p->frames_64 = GET_STAT(64B);
4032 p->frames_65_127 = GET_STAT(65B_127B);
4033 p->frames_128_255 = GET_STAT(128B_255B);
4034 p->frames_256_511 = GET_STAT(256B_511B);
4035 p->frames_512_1023 = GET_STAT(512B_1023B);
4036 p->frames_1024_1518 = GET_STAT(1024B_1518B);
4037 p->frames_1519_max = GET_STAT(1519B_MAX);
4038 p->drop = GET_STAT(DROP_FRAMES);
4040 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
4041 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
4042 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
4043 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
4044 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
4045 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
4046 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
4047 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
4054 * t4_wol_magic_enable - enable/disable magic packet WoL
4055 * @adap: the adapter
4056 * @port: the physical port index
4057 * @addr: MAC address expected in magic packets, %NULL to disable
4059 * Enables/disables magic packet wake-on-LAN for the selected port.
4061 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
4064 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
4067 mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO);
4068 mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI);
4069 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
4071 mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO);
4072 mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI);
4073 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
4077 t4_write_reg(adap, mag_id_reg_l,
4078 (addr[2] << 24) | (addr[3] << 16) |
4079 (addr[4] << 8) | addr[5]);
4080 t4_write_reg(adap, mag_id_reg_h,
4081 (addr[0] << 8) | addr[1]);
4083 t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN,
4084 V_MAGICEN(addr != NULL));
4088 * t4_wol_pat_enable - enable/disable pattern-based WoL
4089 * @adap: the adapter
4090 * @port: the physical port index
4091 * @map: bitmap of which HW pattern filters to set
4092 * @mask0: byte mask for bytes 0-63 of a packet
4093 * @mask1: byte mask for bytes 64-127 of a packet
4094 * @crc: Ethernet CRC for selected bytes
4095 * @enable: enable/disable switch
4097 * Sets the pattern filters indicated in @map to mask out the bytes
4098 * specified in @mask0/@mask1 in received packets and compare the CRC of
4099 * the resulting packet against @crc. If @enable is %true pattern-based
4100 * WoL is enabled, otherwise disabled.
4102 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
4103 u64 mask0, u64 mask1, unsigned int crc, bool enable)
4109 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
4111 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
4114 t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
4120 #define EPIO_REG(name) \
4121 (is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \
4122 T5_PORT_REG(port, A_MAC_PORT_EPIO_##name))
4124 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
4125 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
4126 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
4128 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
4132 /* write byte masks */
4133 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
4134 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
4135 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
4136 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
4140 t4_write_reg(adap, EPIO_REG(DATA0), crc);
4141 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
4142 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
4143 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
4148 t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN);
4153 * t4_mk_filtdelwr - create a delete filter WR
4154 * @ftid: the filter ID
4155 * @wr: the filter work request to populate
4156 * @qid: ingress queue to receive the delete notification
4158 * Creates a filter work request to delete the supplied filter. If @qid is
4159 * negative the delete notification is suppressed.
4161 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
4163 memset(wr, 0, sizeof(*wr));
4164 wr->op_pkd = htonl(V_FW_WR_OP(FW_FILTER_WR));
4165 wr->len16_pkd = htonl(V_FW_WR_LEN16(sizeof(*wr) / 16));
4166 wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
4167 V_FW_FILTER_WR_NOREPLY(qid < 0));
4168 wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
4170 wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
4173 #define INIT_CMD(var, cmd, rd_wr) do { \
4174 (var).op_to_write = htonl(V_FW_CMD_OP(FW_##cmd##_CMD) | \
4175 F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
4176 (var).retval_len16 = htonl(FW_LEN16(var)); \
4179 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, u32 addr, u32 val)
4181 struct fw_ldst_cmd c;
4183 memset(&c, 0, sizeof(c));
4184 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4185 F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
4186 c.cycles_to_len16 = htonl(FW_LEN16(c));
4187 c.u.addrval.addr = htonl(addr);
4188 c.u.addrval.val = htonl(val);
4190 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4194 * t4_mdio_rd - read a PHY register through MDIO
4195 * @adap: the adapter
4196 * @mbox: mailbox to use for the FW command
4197 * @phy_addr: the PHY address
4198 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
4199 * @reg: the register to read
4200 * @valp: where to store the value
4202 * Issues a FW command through the given mailbox to read a PHY register.
4204 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
4205 unsigned int mmd, unsigned int reg, unsigned int *valp)
4208 struct fw_ldst_cmd c;
4210 memset(&c, 0, sizeof(c));
4211 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4212 F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
4213 c.cycles_to_len16 = htonl(FW_LEN16(c));
4214 c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
4215 V_FW_LDST_CMD_MMD(mmd));
4216 c.u.mdio.raddr = htons(reg);
4218 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4220 *valp = ntohs(c.u.mdio.rval);
4225 * t4_mdio_wr - write a PHY register through MDIO
4226 * @adap: the adapter
4227 * @mbox: mailbox to use for the FW command
4228 * @phy_addr: the PHY address
4229 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
4230 * @reg: the register to write
4231 * @valp: value to write
4233 * Issues a FW command through the given mailbox to write a PHY register.
4235 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
4236 unsigned int mmd, unsigned int reg, unsigned int val)
4238 struct fw_ldst_cmd c;
4240 memset(&c, 0, sizeof(c));
4241 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4242 F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
4243 c.cycles_to_len16 = htonl(FW_LEN16(c));
4244 c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
4245 V_FW_LDST_CMD_MMD(mmd));
4246 c.u.mdio.raddr = htons(reg);
4247 c.u.mdio.rval = htons(val);
4249 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4253 * t4_i2c_rd - read I2C data from adapter
4254 * @adap: the adapter
4255 * @port: Port number if per-port device; <0 if not
4256 * @devid: per-port device ID or absolute device ID
4257 * @offset: byte offset into device I2C space
4258 * @len: byte length of I2C space data
4259 * @buf: buffer in which to return I2C data
4261 * Reads the I2C data from the indicated device and location.
4263 int t4_i2c_rd(struct adapter *adap, unsigned int mbox,
4264 int port, unsigned int devid,
4265 unsigned int offset, unsigned int len,
4268 struct fw_ldst_cmd ldst;
4274 len > sizeof ldst.u.i2c.data)
4277 memset(&ldst, 0, sizeof ldst);
4278 ldst.op_to_addrspace =
4279 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
4282 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C));
4283 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
4284 ldst.u.i2c.pid = (port < 0 ? 0xff : port);
4285 ldst.u.i2c.did = devid;
4286 ldst.u.i2c.boffset = offset;
4287 ldst.u.i2c.blen = len;
4288 ret = t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
4290 memcpy(buf, ldst.u.i2c.data, len);
4295 * t4_i2c_wr - write I2C data to adapter
4296 * @adap: the adapter
4297 * @port: Port number if per-port device; <0 if not
4298 * @devid: per-port device ID or absolute device ID
4299 * @offset: byte offset into device I2C space
4300 * @len: byte length of I2C space data
4301 * @buf: buffer containing new I2C data
4303 * Write the I2C data to the indicated device and location.
4305 int t4_i2c_wr(struct adapter *adap, unsigned int mbox,
4306 int port, unsigned int devid,
4307 unsigned int offset, unsigned int len,
4310 struct fw_ldst_cmd ldst;
4315 len > sizeof ldst.u.i2c.data)
4318 memset(&ldst, 0, sizeof ldst);
4319 ldst.op_to_addrspace =
4320 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
4323 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C));
4324 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
4325 ldst.u.i2c.pid = (port < 0 ? 0xff : port);
4326 ldst.u.i2c.did = devid;
4327 ldst.u.i2c.boffset = offset;
4328 ldst.u.i2c.blen = len;
4329 memcpy(ldst.u.i2c.data, buf, len);
4330 return t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
4334 * t4_sge_ctxt_flush - flush the SGE context cache
4335 * @adap: the adapter
4336 * @mbox: mailbox to use for the FW command
4338 * Issues a FW command through the given mailbox to flush the
4339 * SGE context cache.
4341 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
4344 struct fw_ldst_cmd c;
4346 memset(&c, 0, sizeof(c));
4347 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4349 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC));
4350 c.cycles_to_len16 = htonl(FW_LEN16(c));
4351 c.u.idctxt.msg_ctxtflush = htonl(F_FW_LDST_CMD_CTXTFLUSH);
4353 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4358 * t4_sge_ctxt_rd - read an SGE context through FW
4359 * @adap: the adapter
4360 * @mbox: mailbox to use for the FW command
4361 * @cid: the context id
4362 * @ctype: the context type
4363 * @data: where to store the context data
4365 * Issues a FW command through the given mailbox to read an SGE context.
4367 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
4368 enum ctxt_type ctype, u32 *data)
4371 struct fw_ldst_cmd c;
4373 if (ctype == CTXT_EGRESS)
4374 ret = FW_LDST_ADDRSPC_SGE_EGRC;
4375 else if (ctype == CTXT_INGRESS)
4376 ret = FW_LDST_ADDRSPC_SGE_INGC;
4377 else if (ctype == CTXT_FLM)
4378 ret = FW_LDST_ADDRSPC_SGE_FLMC;
4380 ret = FW_LDST_ADDRSPC_SGE_CONMC;
4382 memset(&c, 0, sizeof(c));
4383 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4384 F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(ret));
4385 c.cycles_to_len16 = htonl(FW_LEN16(c));
4386 c.u.idctxt.physid = htonl(cid);
4388 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4390 data[0] = ntohl(c.u.idctxt.ctxt_data0);
4391 data[1] = ntohl(c.u.idctxt.ctxt_data1);
4392 data[2] = ntohl(c.u.idctxt.ctxt_data2);
4393 data[3] = ntohl(c.u.idctxt.ctxt_data3);
4394 data[4] = ntohl(c.u.idctxt.ctxt_data4);
4395 data[5] = ntohl(c.u.idctxt.ctxt_data5);
4401 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
4402 * @adap: the adapter
4403 * @cid: the context id
4404 * @ctype: the context type
4405 * @data: where to store the context data
4407 * Reads an SGE context directly, bypassing FW. This is only for
4408 * debugging when FW is unavailable.
4410 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
4415 t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
4416 ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
4418 for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
4419 *data++ = t4_read_reg(adap, i);
4424 * t4_fw_hello - establish communication with FW
4425 * @adap: the adapter
4426 * @mbox: mailbox to use for the FW command
4427 * @evt_mbox: mailbox to receive async FW events
4428 * @master: specifies the caller's willingness to be the device master
4429 * @state: returns the current device state (if non-NULL)
4431 * Issues a command to establish communication with FW. Returns either
4432 * an error (negative integer) or the mailbox of the Master PF.
4434 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
4435 enum dev_master master, enum dev_state *state)
4438 struct fw_hello_cmd c;
4440 unsigned int master_mbox;
4441 int retries = FW_CMD_HELLO_RETRIES;
4444 memset(&c, 0, sizeof(c));
4445 INIT_CMD(c, HELLO, WRITE);
4446 c.err_to_clearinit = htonl(
4447 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
4448 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
4449 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
4450 M_FW_HELLO_CMD_MBMASTER) |
4451 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
4452 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
4453 F_FW_HELLO_CMD_CLEARINIT);
4456 * Issue the HELLO command to the firmware. If it's not successful
4457 * but indicates that we got a "busy" or "timeout" condition, retry
4458 * the HELLO until we exhaust our retry limit. If we do exceed our
4459 * retry limit, check to see if the firmware left us any error
4460 * information and report that if so ...
4462 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4463 if (ret != FW_SUCCESS) {
4464 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
4466 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
4467 t4_report_fw_error(adap);
4471 v = ntohl(c.err_to_clearinit);
4472 master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
4474 if (v & F_FW_HELLO_CMD_ERR)
4475 *state = DEV_STATE_ERR;
4476 else if (v & F_FW_HELLO_CMD_INIT)
4477 *state = DEV_STATE_INIT;
4479 *state = DEV_STATE_UNINIT;
4483 * If we're not the Master PF then we need to wait around for the
4484 * Master PF Driver to finish setting up the adapter.
4486 * Note that we also do this wait if we're a non-Master-capable PF and
4487 * there is no current Master PF; a Master PF may show up momentarily
4488 * and we wouldn't want to fail pointlessly. (This can happen when an
4489 * OS loads lots of different drivers rapidly at the same time). In
4490 * this case, the Master PF returned by the firmware will be
4491 * M_PCIE_FW_MASTER so the test below will work ...
4493 if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
4494 master_mbox != mbox) {
4495 int waiting = FW_CMD_HELLO_TIMEOUT;
4498 * Wait for the firmware to either indicate an error or
4499 * initialized state. If we see either of these we bail out
4500 * and report the issue to the caller. If we exhaust the
4501 * "hello timeout" and we haven't exhausted our retries, try
4502 * again. Otherwise bail with a timeout error.
4511 * If neither Error nor Initialialized are indicated
4512 * by the firmware keep waiting till we exhaust our
4513 * timeout ... and then retry if we haven't exhausted
4516 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
4517 if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
4528 * We either have an Error or Initialized condition
4529 * report errors preferentially.
4532 if (pcie_fw & F_PCIE_FW_ERR)
4533 *state = DEV_STATE_ERR;
4534 else if (pcie_fw & F_PCIE_FW_INIT)
4535 *state = DEV_STATE_INIT;
4539 * If we arrived before a Master PF was selected and
4540 * there's not a valid Master PF, grab its identity
4543 if (master_mbox == M_PCIE_FW_MASTER &&
4544 (pcie_fw & F_PCIE_FW_MASTER_VLD))
4545 master_mbox = G_PCIE_FW_MASTER(pcie_fw);
4554 * t4_fw_bye - end communication with FW
4555 * @adap: the adapter
4556 * @mbox: mailbox to use for the FW command
4558 * Issues a command to terminate communication with FW.
4560 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
4562 struct fw_bye_cmd c;
4564 memset(&c, 0, sizeof(c));
4565 INIT_CMD(c, BYE, WRITE);
4566 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4570 * t4_fw_reset - issue a reset to FW
4571 * @adap: the adapter
4572 * @mbox: mailbox to use for the FW command
4573 * @reset: specifies the type of reset to perform
4575 * Issues a reset command of the specified type to FW.
4577 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
4579 struct fw_reset_cmd c;
4581 memset(&c, 0, sizeof(c));
4582 INIT_CMD(c, RESET, WRITE);
4583 c.val = htonl(reset);
4584 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4588 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
4589 * @adap: the adapter
4590 * @mbox: mailbox to use for the FW RESET command (if desired)
4591 * @force: force uP into RESET even if FW RESET command fails
4593 * Issues a RESET command to firmware (if desired) with a HALT indication
4594 * and then puts the microprocessor into RESET state. The RESET command
4595 * will only be issued if a legitimate mailbox is provided (mbox <=
4596 * M_PCIE_FW_MASTER).
4598 * This is generally used in order for the host to safely manipulate the
4599 * adapter without fear of conflicting with whatever the firmware might
4600 * be doing. The only way out of this state is to RESTART the firmware
4603 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
4608 * If a legitimate mailbox is provided, issue a RESET command
4609 * with a HALT indication.
4611 if (mbox <= M_PCIE_FW_MASTER) {
4612 struct fw_reset_cmd c;
4614 memset(&c, 0, sizeof(c));
4615 INIT_CMD(c, RESET, WRITE);
4616 c.val = htonl(F_PIORST | F_PIORSTMODE);
4617 c.halt_pkd = htonl(F_FW_RESET_CMD_HALT);
4618 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4622 * Normally we won't complete the operation if the firmware RESET
4623 * command fails but if our caller insists we'll go ahead and put the
4624 * uP into RESET. This can be useful if the firmware is hung or even
4625 * missing ... We'll have to take the risk of putting the uP into
4626 * RESET without the cooperation of firmware in that case.
4628 * We also force the firmware's HALT flag to be on in case we bypassed
4629 * the firmware RESET command above or we're dealing with old firmware
4630 * which doesn't have the HALT capability. This will serve as a flag
4631 * for the incoming firmware to know that it's coming out of a HALT
4632 * rather than a RESET ... if it's new enough to understand that ...
4634 if (ret == 0 || force) {
4635 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
4636 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, F_PCIE_FW_HALT);
4640 * And we always return the result of the firmware RESET command
4641 * even when we force the uP into RESET ...
4647 * t4_fw_restart - restart the firmware by taking the uP out of RESET
4648 * @adap: the adapter
4649 * @reset: if we want to do a RESET to restart things
4651 * Restart firmware previously halted by t4_fw_halt(). On successful
4652 * return the previous PF Master remains as the new PF Master and there
4653 * is no need to issue a new HELLO command, etc.
4655 * We do this in two ways:
4657 * 1. If we're dealing with newer firmware we'll simply want to take
4658 * the chip's microprocessor out of RESET. This will cause the
4659 * firmware to start up from its start vector. And then we'll loop
4660 * until the firmware indicates it's started again (PCIE_FW.HALT
4661 * reset to 0) or we timeout.
4663 * 2. If we're dealing with older firmware then we'll need to RESET
4664 * the chip since older firmware won't recognize the PCIE_FW.HALT
4665 * flag and automatically RESET itself on startup.
4667 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
4671 * Since we're directing the RESET instead of the firmware
4672 * doing it automatically, we need to clear the PCIE_FW.HALT
4675 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
4678 * If we've been given a valid mailbox, first try to get the
4679 * firmware to do the RESET. If that works, great and we can
4680 * return success. Otherwise, if we haven't been given a
4681 * valid mailbox or the RESET command failed, fall back to
4682 * hitting the chip with a hammer.
4684 if (mbox <= M_PCIE_FW_MASTER) {
4685 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
4687 if (t4_fw_reset(adap, mbox,
4688 F_PIORST | F_PIORSTMODE) == 0)
4692 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
4697 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
4698 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
4699 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
4710 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
4711 * @adap: the adapter
4712 * @mbox: mailbox to use for the FW RESET command (if desired)
4713 * @fw_data: the firmware image to write
4715 * @force: force upgrade even if firmware doesn't cooperate
4717 * Perform all of the steps necessary for upgrading an adapter's
4718 * firmware image. Normally this requires the cooperation of the
4719 * existing firmware in order to halt all existing activities
4720 * but if an invalid mailbox token is passed in we skip that step
4721 * (though we'll still put the adapter microprocessor into RESET in
4724 * On successful return the new firmware will have been loaded and
4725 * the adapter will have been fully RESET losing all previous setup
4726 * state. On unsuccessful return the adapter may be completely hosed ...
4727 * positive errno indicates that the adapter is ~probably~ intact, a
4728 * negative errno indicates that things are looking bad ...
4730 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
4731 const u8 *fw_data, unsigned int size, int force)
4733 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
4734 unsigned int bootstrap = ntohl(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
4738 ret = t4_fw_halt(adap, mbox, force);
4739 if (ret < 0 && !force)
4743 ret = t4_load_fw(adap, fw_data, size);
4744 if (ret < 0 || bootstrap)
4748 * Older versions of the firmware don't understand the new
4749 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
4750 * restart. So for newly loaded older firmware we'll have to do the
4751 * RESET for it so it starts up on a clean slate. We can tell if
4752 * the newly loaded firmware will handle this right by checking
4753 * its header flags to see if it advertises the capability.
4755 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
4756 return t4_fw_restart(adap, mbox, reset);
4760 * t4_fw_initialize - ask FW to initialize the device
4761 * @adap: the adapter
4762 * @mbox: mailbox to use for the FW command
4764 * Issues a command to FW to partially initialize the device. This
4765 * performs initialization that generally doesn't depend on user input.
4767 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
4769 struct fw_initialize_cmd c;
4771 memset(&c, 0, sizeof(c));
4772 INIT_CMD(c, INITIALIZE, WRITE);
4773 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4777 * t4_query_params - query FW or device parameters
4778 * @adap: the adapter
4779 * @mbox: mailbox to use for the FW command
4782 * @nparams: the number of parameters
4783 * @params: the parameter names
4784 * @val: the parameter values
4786 * Reads the value of FW or device parameters. Up to 7 parameters can be
4789 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4790 unsigned int vf, unsigned int nparams, const u32 *params,
4794 struct fw_params_cmd c;
4795 __be32 *p = &c.param[0].mnem;
4800 memset(&c, 0, sizeof(c));
4801 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4802 F_FW_CMD_READ | V_FW_PARAMS_CMD_PFN(pf) |
4803 V_FW_PARAMS_CMD_VFN(vf));
4804 c.retval_len16 = htonl(FW_LEN16(c));
4806 for (i = 0; i < nparams; i++, p += 2, params++)
4807 *p = htonl(*params);
4809 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4811 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
4817 * t4_set_params - sets FW or device parameters
4818 * @adap: the adapter
4819 * @mbox: mailbox to use for the FW command
4822 * @nparams: the number of parameters
4823 * @params: the parameter names
4824 * @val: the parameter values
4826 * Sets the value of FW or device parameters. Up to 7 parameters can be
4827 * specified at once.
4829 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4830 unsigned int vf, unsigned int nparams, const u32 *params,
4833 struct fw_params_cmd c;
4834 __be32 *p = &c.param[0].mnem;
4839 memset(&c, 0, sizeof(c));
4840 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4841 F_FW_CMD_WRITE | V_FW_PARAMS_CMD_PFN(pf) |
4842 V_FW_PARAMS_CMD_VFN(vf));
4843 c.retval_len16 = htonl(FW_LEN16(c));
4846 *p++ = htonl(*params);
4852 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4856 * t4_cfg_pfvf - configure PF/VF resource limits
4857 * @adap: the adapter
4858 * @mbox: mailbox to use for the FW command
4859 * @pf: the PF being configured
4860 * @vf: the VF being configured
4861 * @txq: the max number of egress queues
4862 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
4863 * @rxqi: the max number of interrupt-capable ingress queues
4864 * @rxq: the max number of interruptless ingress queues
4865 * @tc: the PCI traffic class
4866 * @vi: the max number of virtual interfaces
4867 * @cmask: the channel access rights mask for the PF/VF
4868 * @pmask: the port access rights mask for the PF/VF
4869 * @nexact: the maximum number of exact MPS filters
4870 * @rcaps: read capabilities
4871 * @wxcaps: write/execute capabilities
4873 * Configures resource limits and capabilities for a physical or virtual
4876 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
4877 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
4878 unsigned int rxqi, unsigned int rxq, unsigned int tc,
4879 unsigned int vi, unsigned int cmask, unsigned int pmask,
4880 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
4882 struct fw_pfvf_cmd c;
4884 memset(&c, 0, sizeof(c));
4885 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
4886 F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
4887 V_FW_PFVF_CMD_VFN(vf));
4888 c.retval_len16 = htonl(FW_LEN16(c));
4889 c.niqflint_niq = htonl(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
4890 V_FW_PFVF_CMD_NIQ(rxq));
4891 c.type_to_neq = htonl(V_FW_PFVF_CMD_CMASK(cmask) |
4892 V_FW_PFVF_CMD_PMASK(pmask) |
4893 V_FW_PFVF_CMD_NEQ(txq));
4894 c.tc_to_nexactf = htonl(V_FW_PFVF_CMD_TC(tc) | V_FW_PFVF_CMD_NVI(vi) |
4895 V_FW_PFVF_CMD_NEXACTF(nexact));
4896 c.r_caps_to_nethctrl = htonl(V_FW_PFVF_CMD_R_CAPS(rcaps) |
4897 V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
4898 V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
4899 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4903 * t4_alloc_vi_func - allocate a virtual interface
4904 * @adap: the adapter
4905 * @mbox: mailbox to use for the FW command
4906 * @port: physical port associated with the VI
4907 * @pf: the PF owning the VI
4908 * @vf: the VF owning the VI
4909 * @nmac: number of MAC addresses needed (1 to 5)
4910 * @mac: the MAC addresses of the VI
4911 * @rss_size: size of RSS table slice associated with this VI
4912 * @portfunc: which Port Application Function MAC Address is desired
4913 * @idstype: Intrusion Detection Type
4915 * Allocates a virtual interface for the given physical port. If @mac is
4916 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
4917 * @mac should be large enough to hold @nmac Ethernet addresses, they are
4918 * stored consecutively so the space needed is @nmac * 6 bytes.
4919 * Returns a negative error number or the non-negative VI id.
4921 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
4922 unsigned int port, unsigned int pf, unsigned int vf,
4923 unsigned int nmac, u8 *mac, u16 *rss_size,
4924 unsigned int portfunc, unsigned int idstype)
4929 memset(&c, 0, sizeof(c));
4930 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
4931 F_FW_CMD_WRITE | F_FW_CMD_EXEC |
4932 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
4933 c.alloc_to_len16 = htonl(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
4934 c.type_to_viid = htons(V_FW_VI_CMD_TYPE(idstype) |
4935 V_FW_VI_CMD_FUNC(portfunc));
4936 c.portid_pkd = V_FW_VI_CMD_PORTID(port);
4939 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4944 memcpy(mac, c.mac, sizeof(c.mac));
4947 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
4949 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
4951 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
4953 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
4957 *rss_size = G_FW_VI_CMD_RSSSIZE(ntohs(c.norss_rsssize));
4958 return G_FW_VI_CMD_VIID(htons(c.type_to_viid));
4962 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface
4963 * @adap: the adapter
4964 * @mbox: mailbox to use for the FW command
4965 * @port: physical port associated with the VI
4966 * @pf: the PF owning the VI
4967 * @vf: the VF owning the VI
4968 * @nmac: number of MAC addresses needed (1 to 5)
4969 * @mac: the MAC addresses of the VI
4970 * @rss_size: size of RSS table slice associated with this VI
4972 * backwards compatible and convieniance routine to allocate a Virtual
4973 * Interface with a Ethernet Port Application Function and Intrustion
4974 * Detection System disabled.
4976 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
4977 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
4980 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
4985 * t4_free_vi - free a virtual interface
4986 * @adap: the adapter
4987 * @mbox: mailbox to use for the FW command
4988 * @pf: the PF owning the VI
4989 * @vf: the VF owning the VI
4990 * @viid: virtual interface identifiler
4992 * Free a previously allocated virtual interface.
4994 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
4995 unsigned int vf, unsigned int viid)
4999 memset(&c, 0, sizeof(c));
5000 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) |
5003 V_FW_VI_CMD_PFN(pf) |
5004 V_FW_VI_CMD_VFN(vf));
5005 c.alloc_to_len16 = htonl(F_FW_VI_CMD_FREE | FW_LEN16(c));
5006 c.type_to_viid = htons(V_FW_VI_CMD_VIID(viid));
5008 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5012 * t4_set_rxmode - set Rx properties of a virtual interface
5013 * @adap: the adapter
5014 * @mbox: mailbox to use for the FW command
5016 * @mtu: the new MTU or -1
5017 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
5018 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
5019 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
5020 * @vlanex: 1 to enable HVLAN extraction, 0 to disable it, -1 no change
5021 * @sleep_ok: if true we may sleep while awaiting command completion
5023 * Sets Rx properties of a virtual interface.
5025 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
5026 int mtu, int promisc, int all_multi, int bcast, int vlanex,
5029 struct fw_vi_rxmode_cmd c;
5031 /* convert to FW values */
5033 mtu = M_FW_VI_RXMODE_CMD_MTU;
5035 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
5037 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
5039 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
5041 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
5043 memset(&c, 0, sizeof(c));
5044 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | F_FW_CMD_REQUEST |
5045 F_FW_CMD_WRITE | V_FW_VI_RXMODE_CMD_VIID(viid));
5046 c.retval_len16 = htonl(FW_LEN16(c));
5047 c.mtu_to_vlanexen = htonl(V_FW_VI_RXMODE_CMD_MTU(mtu) |
5048 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
5049 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
5050 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
5051 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
5052 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
5056 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
5057 * @adap: the adapter
5058 * @mbox: mailbox to use for the FW command
5060 * @free: if true any existing filters for this VI id are first removed
5061 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
5062 * @addr: the MAC address(es)
5063 * @idx: where to store the index of each allocated filter
5064 * @hash: pointer to hash address filter bitmap
5065 * @sleep_ok: call is allowed to sleep
5067 * Allocates an exact-match filter for each of the supplied addresses and
5068 * sets it to the corresponding address. If @idx is not %NULL it should
5069 * have at least @naddr entries, each of which will be set to the index of
5070 * the filter allocated for the corresponding MAC address. If a filter
5071 * could not be allocated for an address its index is set to 0xffff.
5072 * If @hash is not %NULL addresses that fail to allocate an exact filter
5073 * are hashed and update the hash filter bitmap pointed at by @hash.
5075 * Returns a negative error number or the number of filters allocated.
5077 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
5078 unsigned int viid, bool free, unsigned int naddr,
5079 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
5081 int offset, ret = 0;
5082 struct fw_vi_mac_cmd c;
5083 unsigned int nfilters = 0;
5084 unsigned int max_naddr = is_t4(adap) ?
5085 NUM_MPS_CLS_SRAM_L_INSTANCES :
5086 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5087 unsigned int rem = naddr;
5089 if (naddr > max_naddr)
5092 for (offset = 0; offset < naddr ; /**/) {
5093 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
5095 : ARRAY_SIZE(c.u.exact));
5096 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
5097 u.exact[fw_naddr]), 16);
5098 struct fw_vi_mac_exact *p;
5101 memset(&c, 0, sizeof(c));
5102 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) |
5105 V_FW_CMD_EXEC(free) |
5106 V_FW_VI_MAC_CMD_VIID(viid));
5107 c.freemacs_to_len16 = htonl(V_FW_VI_MAC_CMD_FREEMACS(free) |
5108 V_FW_CMD_LEN16(len16));
5110 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
5111 p->valid_to_idx = htons(
5112 F_FW_VI_MAC_CMD_VALID |
5113 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
5114 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
5118 * It's okay if we run out of space in our MAC address arena.
5119 * Some of the addresses we submit may get stored so we need
5120 * to run through the reply to see what the results were ...
5122 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
5123 if (ret && ret != -FW_ENOMEM)
5126 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
5127 u16 index = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
5130 idx[offset+i] = (index >= max_naddr
5133 if (index < max_naddr)
5136 *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
5144 if (ret == 0 || ret == -FW_ENOMEM)
5150 * t4_change_mac - modifies the exact-match filter for a MAC address
5151 * @adap: the adapter
5152 * @mbox: mailbox to use for the FW command
5154 * @idx: index of existing filter for old value of MAC address, or -1
5155 * @addr: the new MAC address value
5156 * @persist: whether a new MAC allocation should be persistent
5157 * @add_smt: if true also add the address to the HW SMT
5159 * Modifies an exact-match filter and sets it to the new MAC address if
5160 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
5161 * latter case the address is added persistently if @persist is %true.
5163 * Note that in general it is not possible to modify the value of a given
5164 * filter so the generic way to modify an address filter is to free the one
5165 * being used by the old address value and allocate a new filter for the
5166 * new address value.
5168 * Returns a negative error number or the index of the filter with the new
5169 * MAC value. Note that this index may differ from @idx.
5171 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
5172 int idx, const u8 *addr, bool persist, bool add_smt)
5175 struct fw_vi_mac_cmd c;
5176 struct fw_vi_mac_exact *p = c.u.exact;
5177 unsigned int max_mac_addr = is_t4(adap) ?
5178 NUM_MPS_CLS_SRAM_L_INSTANCES :
5179 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5181 if (idx < 0) /* new allocation */
5182 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
5183 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
5185 memset(&c, 0, sizeof(c));
5186 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
5187 F_FW_CMD_WRITE | V_FW_VI_MAC_CMD_VIID(viid));
5188 c.freemacs_to_len16 = htonl(V_FW_CMD_LEN16(1));
5189 p->valid_to_idx = htons(F_FW_VI_MAC_CMD_VALID |
5190 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
5191 V_FW_VI_MAC_CMD_IDX(idx));
5192 memcpy(p->macaddr, addr, sizeof(p->macaddr));
5194 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5196 ret = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
5197 if (ret >= max_mac_addr)
5204 * t4_set_addr_hash - program the MAC inexact-match hash filter
5205 * @adap: the adapter
5206 * @mbox: mailbox to use for the FW command
5208 * @ucast: whether the hash filter should also match unicast addresses
5209 * @vec: the value to be written to the hash filter
5210 * @sleep_ok: call is allowed to sleep
5212 * Sets the 64-bit inexact-match hash filter for a virtual interface.
5214 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
5215 bool ucast, u64 vec, bool sleep_ok)
5217 struct fw_vi_mac_cmd c;
5219 memset(&c, 0, sizeof(c));
5220 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
5221 F_FW_CMD_WRITE | V_FW_VI_ENABLE_CMD_VIID(viid));
5222 c.freemacs_to_len16 = htonl(F_FW_VI_MAC_CMD_HASHVECEN |
5223 V_FW_VI_MAC_CMD_HASHUNIEN(ucast) |
5225 c.u.hash.hashvec = cpu_to_be64(vec);
5226 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
5230 * t4_enable_vi - enable/disable a virtual interface
5231 * @adap: the adapter
5232 * @mbox: mailbox to use for the FW command
5234 * @rx_en: 1=enable Rx, 0=disable Rx
5235 * @tx_en: 1=enable Tx, 0=disable Tx
5237 * Enables/disables a virtual interface.
5239 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
5240 bool rx_en, bool tx_en)
5242 struct fw_vi_enable_cmd c;
5244 memset(&c, 0, sizeof(c));
5245 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
5246 F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
5247 c.ien_to_len16 = htonl(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
5248 V_FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
5249 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5253 * t4_identify_port - identify a VI's port by blinking its LED
5254 * @adap: the adapter
5255 * @mbox: mailbox to use for the FW command
5257 * @nblinks: how many times to blink LED at 2.5 Hz
5259 * Identifies a VI's port by blinking its LED.
5261 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
5262 unsigned int nblinks)
5264 struct fw_vi_enable_cmd c;
5266 memset(&c, 0, sizeof(c));
5267 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
5268 F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
5269 c.ien_to_len16 = htonl(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
5270 c.blinkdur = htons(nblinks);
5271 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5275 * t4_iq_start_stop - enable/disable an ingress queue and its FLs
5276 * @adap: the adapter
5277 * @mbox: mailbox to use for the FW command
5278 * @start: %true to enable the queues, %false to disable them
5279 * @pf: the PF owning the queues
5280 * @vf: the VF owning the queues
5281 * @iqid: ingress queue id
5282 * @fl0id: FL0 queue id or 0xffff if no attached FL0
5283 * @fl1id: FL1 queue id or 0xffff if no attached FL1
5285 * Starts or stops an ingress queue and its associated FLs, if any.
5287 int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
5288 unsigned int pf, unsigned int vf, unsigned int iqid,
5289 unsigned int fl0id, unsigned int fl1id)
5293 memset(&c, 0, sizeof(c));
5294 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
5295 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
5296 V_FW_IQ_CMD_VFN(vf));
5297 c.alloc_to_len16 = htonl(V_FW_IQ_CMD_IQSTART(start) |
5298 V_FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
5299 c.iqid = htons(iqid);
5300 c.fl0id = htons(fl0id);
5301 c.fl1id = htons(fl1id);
5302 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5306 * t4_iq_free - free an ingress queue and its FLs
5307 * @adap: the adapter
5308 * @mbox: mailbox to use for the FW command
5309 * @pf: the PF owning the queues
5310 * @vf: the VF owning the queues
5311 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
5312 * @iqid: ingress queue id
5313 * @fl0id: FL0 queue id or 0xffff if no attached FL0
5314 * @fl1id: FL1 queue id or 0xffff if no attached FL1
5316 * Frees an ingress queue and its associated FLs, if any.
5318 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5319 unsigned int vf, unsigned int iqtype, unsigned int iqid,
5320 unsigned int fl0id, unsigned int fl1id)
5324 memset(&c, 0, sizeof(c));
5325 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
5326 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
5327 V_FW_IQ_CMD_VFN(vf));
5328 c.alloc_to_len16 = htonl(F_FW_IQ_CMD_FREE | FW_LEN16(c));
5329 c.type_to_iqandstindex = htonl(V_FW_IQ_CMD_TYPE(iqtype));
5330 c.iqid = htons(iqid);
5331 c.fl0id = htons(fl0id);
5332 c.fl1id = htons(fl1id);
5333 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5337 * t4_eth_eq_free - free an Ethernet egress queue
5338 * @adap: the adapter
5339 * @mbox: mailbox to use for the FW command
5340 * @pf: the PF owning the queue
5341 * @vf: the VF owning the queue
5342 * @eqid: egress queue id
5344 * Frees an Ethernet egress queue.
5346 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5347 unsigned int vf, unsigned int eqid)
5349 struct fw_eq_eth_cmd c;
5351 memset(&c, 0, sizeof(c));
5352 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
5353 F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(pf) |
5354 V_FW_EQ_ETH_CMD_VFN(vf));
5355 c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
5356 c.eqid_pkd = htonl(V_FW_EQ_ETH_CMD_EQID(eqid));
5357 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5361 * t4_ctrl_eq_free - free a control egress queue
5362 * @adap: the adapter
5363 * @mbox: mailbox to use for the FW command
5364 * @pf: the PF owning the queue
5365 * @vf: the VF owning the queue
5366 * @eqid: egress queue id
5368 * Frees a control egress queue.
5370 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5371 unsigned int vf, unsigned int eqid)
5373 struct fw_eq_ctrl_cmd c;
5375 memset(&c, 0, sizeof(c));
5376 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
5377 F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(pf) |
5378 V_FW_EQ_CTRL_CMD_VFN(vf));
5379 c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
5380 c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_EQID(eqid));
5381 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5385 * t4_ofld_eq_free - free an offload egress queue
5386 * @adap: the adapter
5387 * @mbox: mailbox to use for the FW command
5388 * @pf: the PF owning the queue
5389 * @vf: the VF owning the queue
5390 * @eqid: egress queue id
5392 * Frees a control egress queue.
5394 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5395 unsigned int vf, unsigned int eqid)
5397 struct fw_eq_ofld_cmd c;
5399 memset(&c, 0, sizeof(c));
5400 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
5401 F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(pf) |
5402 V_FW_EQ_OFLD_CMD_VFN(vf));
5403 c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
5404 c.eqid_pkd = htonl(V_FW_EQ_OFLD_CMD_EQID(eqid));
5405 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5409 * t4_handle_fw_rpl - process a FW reply message
5410 * @adap: the adapter
5411 * @rpl: start of the FW message
5413 * Processes a FW message, such as link state change messages.
5415 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
5417 u8 opcode = *(const u8 *)rpl;
5418 const struct fw_port_cmd *p = (const void *)rpl;
5419 unsigned int action = G_FW_PORT_CMD_ACTION(ntohl(p->action_to_len16));
5421 if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
5422 /* link/module state change message */
5423 int speed = 0, fc = 0, i;
5424 int chan = G_FW_PORT_CMD_PORTID(ntohl(p->op_to_portid));
5425 struct port_info *pi = NULL;
5426 struct link_config *lc;
5427 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
5428 int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
5429 u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
5431 if (stat & F_FW_PORT_CMD_RXPAUSE)
5433 if (stat & F_FW_PORT_CMD_TXPAUSE)
5435 if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
5437 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
5439 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
5440 speed = SPEED_10000;
5441 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
5442 speed = SPEED_40000;
5444 for_each_port(adap, i) {
5445 pi = adap2pinfo(adap, i);
5446 if (pi->tx_chan == chan)
5451 if (mod != pi->mod_type) {
5453 t4_os_portmod_changed(adap, i);
5455 if (link_ok != lc->link_ok || speed != lc->speed ||
5456 fc != lc->fc) { /* something changed */
5459 if (!link_ok && lc->link_ok)
5460 reason = G_FW_PORT_CMD_LINKDNRC(stat);
5464 lc->link_ok = link_ok;
5467 lc->supported = ntohs(p->u.info.pcap);
5468 t4_os_link_changed(adap, i, link_ok, reason);
5471 CH_WARN_RATELIMIT(adap,
5472 "Unknown firmware reply 0x%x (0x%x)\n", opcode, action);
5479 * get_pci_mode - determine a card's PCI mode
5480 * @adapter: the adapter
5481 * @p: where to store the PCI settings
5483 * Determines a card's PCI mode and associated parameters, such as speed
5486 static void __devinit get_pci_mode(struct adapter *adapter,
5487 struct pci_params *p)
5492 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
5494 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
5495 p->speed = val & PCI_EXP_LNKSTA_CLS;
5496 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
5501 * init_link_config - initialize a link's SW state
5502 * @lc: structure holding the link state
5503 * @caps: link capabilities
5505 * Initializes the SW state maintained for each link, including the link's
5506 * capabilities and default speed/flow-control/autonegotiation settings.
5508 static void __devinit init_link_config(struct link_config *lc,
5511 lc->supported = caps;
5512 lc->requested_speed = 0;
5514 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
5515 if (lc->supported & FW_PORT_CAP_ANEG) {
5516 lc->advertising = lc->supported & ADVERT_MASK;
5517 lc->autoneg = AUTONEG_ENABLE;
5518 lc->requested_fc |= PAUSE_AUTONEG;
5520 lc->advertising = 0;
5521 lc->autoneg = AUTONEG_DISABLE;
5525 static int __devinit get_flash_params(struct adapter *adapter)
5530 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
5532 ret = sf1_read(adapter, 3, 0, 1, &info);
5533 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
5537 if ((info & 0xff) != 0x20) /* not a Numonix flash */
5539 info >>= 16; /* log2 of size */
5540 if (info >= 0x14 && info < 0x18)
5541 adapter->params.sf_nsec = 1 << (info - 16);
5542 else if (info == 0x18)
5543 adapter->params.sf_nsec = 64;
5546 adapter->params.sf_size = 1 << info;
5550 static void __devinit set_pcie_completion_timeout(struct adapter *adapter,
5556 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
5558 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
5561 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
5566 * t4_prep_adapter - prepare SW and HW for operation
5567 * @adapter: the adapter
5568 * @reset: if true perform a HW reset
5570 * Initialize adapter SW state for the various HW modules, set initial
5571 * values for some adapter tunables, take PHYs out of reset, and
5572 * initialize the MDIO interface.
5574 int __devinit t4_prep_adapter(struct adapter *adapter)
5580 get_pci_mode(adapter, &adapter->params.pci);
5582 pl_rev = t4_read_reg(adapter, A_PL_REV);
5583 adapter->params.chipid = G_CHIPID(pl_rev);
5584 adapter->params.rev = G_REV(pl_rev);
5585 if (adapter->params.chipid == 0) {
5586 /* T4 did not have chipid in PL_REV (T5 onwards do) */
5587 adapter->params.chipid = CHELSIO_T4;
5589 /* T4A1 chip is not supported */
5590 if (adapter->params.rev == 1) {
5591 CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n");
5595 adapter->params.pci.vpd_cap_addr =
5596 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
5598 ret = get_flash_params(adapter);
5602 ret = get_vpd_params(adapter, &adapter->params.vpd);
5606 /* Cards with real ASICs have the chipid in the PCIe device id */
5607 t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id);
5608 if (device_id >> 12 == adapter->params.chipid)
5609 adapter->params.cim_la_size = CIMLA_SIZE;
5612 adapter->params.fpga = 1;
5613 adapter->params.cim_la_size = 2 * CIMLA_SIZE;
5616 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
5619 * Default port and clock for debugging in case we can't reach FW.
5621 adapter->params.nports = 1;
5622 adapter->params.portvec = 1;
5623 adapter->params.vpd.cclk = 50000;
5625 /* Set pci completion timeout value to 4 seconds. */
5626 set_pcie_completion_timeout(adapter, 0xd);
5631 * t4_init_tp_params - initialize adap->params.tp
5632 * @adap: the adapter
5634 * Initialize various fields of the adapter's TP Parameters structure.
5636 int __devinit t4_init_tp_params(struct adapter *adap)
5641 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
5642 adap->params.tp.tre = G_TIMERRESOLUTION(v);
5643 adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v);
5645 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
5646 for (chan = 0; chan < NCHAN; chan++)
5647 adap->params.tp.tx_modq[chan] = chan;
5649 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5650 &adap->params.tp.ingress_config, 1,
5651 A_TP_INGRESS_CONFIG);
5652 refresh_vlan_pri_map(adap);
5658 * t4_filter_field_shift - calculate filter field shift
5659 * @adap: the adapter
5660 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
5662 * Return the shift position of a filter field within the Compressed
5663 * Filter Tuple. The filter field is specified via its selection bit
5664 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
5666 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
5668 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
5672 if ((filter_mode & filter_sel) == 0)
5675 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
5676 switch (filter_mode & sel) {
5677 case F_FCOE: field_shift += W_FT_FCOE; break;
5678 case F_PORT: field_shift += W_FT_PORT; break;
5679 case F_VNIC_ID: field_shift += W_FT_VNIC_ID; break;
5680 case F_VLAN: field_shift += W_FT_VLAN; break;
5681 case F_TOS: field_shift += W_FT_TOS; break;
5682 case F_PROTOCOL: field_shift += W_FT_PROTOCOL; break;
5683 case F_ETHERTYPE: field_shift += W_FT_ETHERTYPE; break;
5684 case F_MACMATCH: field_shift += W_FT_MACMATCH; break;
5685 case F_MPSHITTYPE: field_shift += W_FT_MPSHITTYPE; break;
5686 case F_FRAGMENTATION: field_shift += W_FT_FRAGMENTATION; break;
5692 int __devinit t4_port_init(struct port_info *p, int mbox, int pf, int vf)
5696 struct fw_port_cmd c;
5698 adapter_t *adap = p->adapter;
5701 memset(&c, 0, sizeof(c));
5703 for (i = 0, j = -1; i <= p->port_id; i++) {
5706 } while ((adap->params.portvec & (1 << j)) == 0);
5709 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) |
5710 F_FW_CMD_REQUEST | F_FW_CMD_READ |
5711 V_FW_PORT_CMD_PORTID(j));
5712 c.action_to_len16 = htonl(
5713 V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
5715 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5719 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
5725 p->rx_chan_map = get_mps_bg_map(adap, j);
5727 p->rss_size = rss_size;
5728 t4_os_set_hw_addr(adap, p->port_id, addr);
5730 ret = ntohl(c.u.info.lstatus_to_modtype);
5731 p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
5732 G_FW_PORT_CMD_MDIOADDR(ret) : -1;
5733 p->port_type = G_FW_PORT_CMD_PTYPE(ret);
5734 p->mod_type = G_FW_PORT_CMD_MODTYPE(ret);
5736 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
5738 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5739 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
5740 V_FW_PARAMS_PARAM_YZ(p->viid);
5741 ret = t4_query_params(adap, mbox, pf, vf, 1, ¶m, &val);
5743 p->rss_base = 0xffff;
5745 /* MPASS((val >> 16) == rss_size); */
5746 p->rss_base = val & 0xffff;
5752 int t4_sched_config(struct adapter *adapter, int type, int minmaxen,
5755 struct fw_sched_cmd cmd;
5757 memset(&cmd, 0, sizeof(cmd));
5758 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
5761 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
5763 cmd.u.config.sc = FW_SCHED_SC_CONFIG;
5764 cmd.u.config.type = type;
5765 cmd.u.config.minmaxen = minmaxen;
5767 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
5771 int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
5772 int rateunit, int ratemode, int channel, int cl,
5773 int minrate, int maxrate, int weight, int pktsize,
5776 struct fw_sched_cmd cmd;
5778 memset(&cmd, 0, sizeof(cmd));
5779 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
5782 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
5784 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
5785 cmd.u.params.type = type;
5786 cmd.u.params.level = level;
5787 cmd.u.params.mode = mode;
5788 cmd.u.params.ch = channel;
5789 cmd.u.params.cl = cl;
5790 cmd.u.params.unit = rateunit;
5791 cmd.u.params.rate = ratemode;
5792 cmd.u.params.min = cpu_to_be32(minrate);
5793 cmd.u.params.max = cpu_to_be32(maxrate);
5794 cmd.u.params.weight = cpu_to_be16(weight);
5795 cmd.u.params.pktsize = cpu_to_be16(pktsize);
5797 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),