2 * Copyright (c) 2012, 2016 Chelsio Communications, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
34 #include "t4_regs_values.h"
35 #include "firmware/t4fw_interface.h"
38 #define msleep(x) do { \
42 pause("t4hw", (x) * hz / 1000); \
46 * t4_wait_op_done_val - wait until an operation is completed
47 * @adapter: the adapter performing the operation
48 * @reg: the register to check for completion
49 * @mask: a single-bit field within @reg that indicates completion
50 * @polarity: the value of the field when the operation is completed
51 * @attempts: number of check iterations
52 * @delay: delay in usecs between iterations
53 * @valp: where to store the value of the register at completion time
55 * Wait until an operation is completed by checking a bit in a register
56 * up to @attempts times. If @valp is not NULL the value of the register
57 * at the time it indicated completion is stored there. Returns 0 if the
58 * operation completes and -EAGAIN otherwise.
60 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
61 int polarity, int attempts, int delay, u32 *valp)
64 u32 val = t4_read_reg(adapter, reg);
66 if (!!(val & mask) == polarity) {
78 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
79 int polarity, int attempts, int delay)
81 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
86 * t4_set_reg_field - set a register field to a value
87 * @adapter: the adapter to program
88 * @addr: the register address
89 * @mask: specifies the portion of the register to modify
90 * @val: the new value for the register field
92 * Sets a register field specified by the supplied mask to the
95 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
98 u32 v = t4_read_reg(adapter, addr) & ~mask;
100 t4_write_reg(adapter, addr, v | val);
101 (void) t4_read_reg(adapter, addr); /* flush */
105 * t4_read_indirect - read indirectly addressed registers
107 * @addr_reg: register holding the indirect address
108 * @data_reg: register holding the value of the indirect register
109 * @vals: where the read register values are stored
110 * @nregs: how many indirect registers to read
111 * @start_idx: index of first indirect register to read
113 * Reads registers that are accessed indirectly through an address/data
116 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
117 unsigned int data_reg, u32 *vals,
118 unsigned int nregs, unsigned int start_idx)
121 t4_write_reg(adap, addr_reg, start_idx);
122 *vals++ = t4_read_reg(adap, data_reg);
128 * t4_write_indirect - write indirectly addressed registers
130 * @addr_reg: register holding the indirect addresses
131 * @data_reg: register holding the value for the indirect registers
132 * @vals: values to write
133 * @nregs: how many indirect registers to write
134 * @start_idx: address of first indirect register to write
136 * Writes a sequential block of registers that are accessed indirectly
137 * through an address/data register pair.
139 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
140 unsigned int data_reg, const u32 *vals,
141 unsigned int nregs, unsigned int start_idx)
144 t4_write_reg(adap, addr_reg, start_idx++);
145 t4_write_reg(adap, data_reg, *vals++);
150 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
151 * mechanism. This guarantees that we get the real value even if we're
152 * operating within a Virtual Machine and the Hypervisor is trapping our
153 * Configuration Space accesses.
155 * N.B. This routine should only be used as a last resort: the firmware uses
156 * the backdoor registers on a regular basis and we can end up
157 * conflicting with it's uses!
159 u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
161 u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg);
164 if (chip_id(adap) <= CHELSIO_T5)
172 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req);
173 val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
176 * Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
177 * Configuration Space read. (None of the other fields matter when
178 * F_ENABLE is 0 so a simple register write is easier than a
179 * read-modify-write via t4_set_reg_field().)
181 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0);
187 * t4_report_fw_error - report firmware error
190 * The adapter firmware can indicate error conditions to the host.
191 * If the firmware has indicated an error, print out the reason for
192 * the firmware error.
194 static void t4_report_fw_error(struct adapter *adap)
196 static const char *const reason[] = {
197 "Crash", /* PCIE_FW_EVAL_CRASH */
198 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
199 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
200 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
201 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
202 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
203 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
204 "Reserved", /* reserved */
208 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
209 if (pcie_fw & F_PCIE_FW_ERR)
210 CH_ERR(adap, "Firmware reports adapter error: %s\n",
211 reason[G_PCIE_FW_EVAL(pcie_fw)]);
215 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
217 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
220 for ( ; nflit; nflit--, mbox_addr += 8)
221 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
225 * Handle a FW assertion reported in a mailbox.
227 static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt)
230 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
231 asrt->u.assert.filename_0_7,
232 be32_to_cpu(asrt->u.assert.line),
233 be32_to_cpu(asrt->u.assert.x),
234 be32_to_cpu(asrt->u.assert.y));
237 #define X_CIM_PF_NOACCESS 0xeeeeeeee
239 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
241 * @mbox: index of the mailbox to use
242 * @cmd: the command to write
243 * @size: command length in bytes
244 * @rpl: where to optionally store the reply
245 * @sleep_ok: if true we may sleep while awaiting command completion
246 * @timeout: time to wait for command to finish before timing out
247 * (negative implies @sleep_ok=false)
249 * Sends the given command to FW through the selected mailbox and waits
250 * for the FW to execute the command. If @rpl is not %NULL it is used to
251 * store the FW's reply to the command. The command and its optional
252 * reply are of the same length. Some FW commands like RESET and
253 * INITIALIZE can take a considerable amount of time to execute.
254 * @sleep_ok determines whether we may sleep while awaiting the response.
255 * If sleeping is allowed we use progressive backoff otherwise we spin.
256 * Note that passing in a negative @timeout is an alternate mechanism
257 * for specifying @sleep_ok=false. This is useful when a higher level
258 * interface allows for specification of @timeout but not @sleep_ok ...
260 * The return value is 0 on success or a negative errno on failure. A
261 * failure can happen either because we are not able to execute the
262 * command or FW executes it but signals an error. In the latter case
263 * the return value is the error code indicated by FW (negated).
265 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
266 int size, void *rpl, bool sleep_ok, int timeout)
269 * We delay in small increments at first in an effort to maintain
270 * responsiveness for simple, fast executing commands but then back
271 * off to larger delays to a maximum retry delay.
273 static const int delay[] = {
274 1, 1, 3, 5, 10, 10, 20, 50, 100
278 int i, ms, delay_idx, ret;
279 const __be64 *p = cmd;
280 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
281 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
283 __be64 cmd_rpl[MBOX_LEN/8];
286 if ((size & 15) || size > MBOX_LEN)
290 * If we have a negative timeout, that implies that we can't sleep.
298 * Attempt to gain access to the mailbox.
300 for (i = 0; i < 4; i++) {
301 ctl = t4_read_reg(adap, ctl_reg);
303 if (v != X_MBOWNER_NONE)
308 * If we were unable to gain access, dequeue ourselves from the
309 * mailbox atomic access list and report the error to our caller.
311 if (v != X_MBOWNER_PL) {
312 t4_report_fw_error(adap);
313 ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
318 * If we gain ownership of the mailbox and there's a "valid" message
319 * in it, this is likely an asynchronous error message from the
320 * firmware. So we'll report that and then proceed on with attempting
321 * to issue our own command ... which may well fail if the error
322 * presaged the firmware crashing ...
324 if (ctl & F_MBMSGVALID) {
325 CH_ERR(adap, "found VALID command in mbox %u: "
326 "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
327 (unsigned long long)t4_read_reg64(adap, data_reg),
328 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
329 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
330 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
331 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
332 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
333 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
334 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
338 * Copy in the new mailbox command and send it on its way ...
340 for (i = 0; i < size; i += 8, p++)
341 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
343 CH_DUMP_MBOX(adap, mbox, data_reg);
345 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
346 t4_read_reg(adap, ctl_reg); /* flush write */
352 * Loop waiting for the reply; bail out if we time out or the firmware
356 !((pcie_fw = t4_read_reg(adap, A_PCIE_FW)) & F_PCIE_FW_ERR) &&
360 ms = delay[delay_idx]; /* last element may repeat */
361 if (delay_idx < ARRAY_SIZE(delay) - 1)
368 v = t4_read_reg(adap, ctl_reg);
369 if (v == X_CIM_PF_NOACCESS)
371 if (G_MBOWNER(v) == X_MBOWNER_PL) {
372 if (!(v & F_MBMSGVALID)) {
373 t4_write_reg(adap, ctl_reg,
374 V_MBOWNER(X_MBOWNER_NONE));
379 * Retrieve the command reply and release the mailbox.
381 get_mbox_rpl(adap, cmd_rpl, size/8, data_reg);
382 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
384 CH_DUMP_MBOX(adap, mbox, data_reg);
386 res = be64_to_cpu(cmd_rpl[0]);
387 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
388 fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl);
389 res = V_FW_CMD_RETVAL(EIO);
391 memcpy(rpl, cmd_rpl, size);
392 return -G_FW_CMD_RETVAL((int)res);
397 * We timed out waiting for a reply to our mailbox command. Report
398 * the error and also check to see if the firmware reported any
401 ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
402 CH_ERR(adap, "command %#x in mailbox %d timed out\n",
403 *(const u8 *)cmd, mbox);
405 t4_report_fw_error(adap);
410 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
411 void *rpl, bool sleep_ok)
413 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl,
414 sleep_ok, FW_CMD_MAX_TIMEOUT);
418 static int t4_edc_err_read(struct adapter *adap, int idx)
420 u32 edc_ecc_err_addr_reg;
421 u32 edc_bist_status_rdata_reg;
424 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
427 if (idx != 0 && idx != 1) {
428 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
432 edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx);
433 edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx);
436 "edc%d err addr 0x%x: 0x%x.\n",
437 idx, edc_ecc_err_addr_reg,
438 t4_read_reg(adap, edc_ecc_err_addr_reg));
440 "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
441 edc_bist_status_rdata_reg,
442 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg),
443 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8),
444 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16),
445 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24),
446 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32),
447 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40),
448 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48),
449 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56),
450 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64));
456 * t4_mc_read - read from MC through backdoor accesses
458 * @idx: which MC to access
459 * @addr: address of first byte requested
460 * @data: 64 bytes of data containing the requested address
461 * @ecc: where to store the corresponding 64-bit ECC word
463 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
464 * that covers the requested address @addr. If @parity is not %NULL it
465 * is assigned the 64-bit ECC word for the read data.
467 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
470 u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
471 u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
474 mc_bist_cmd_reg = A_MC_BIST_CMD;
475 mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR;
476 mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN;
477 mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA;
478 mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN;
480 mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx);
481 mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx);
482 mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx);
483 mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA,
485 mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN,
489 if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST)
491 t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU);
492 t4_write_reg(adap, mc_bist_cmd_len_reg, 64);
493 t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc);
494 t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) |
495 F_START_BIST | V_BIST_CMD_GAP(1));
496 i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
500 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i)
502 for (i = 15; i >= 0; i--)
503 *data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
505 *ecc = t4_read_reg64(adap, MC_DATA(16));
511 * t4_edc_read - read from EDC through backdoor accesses
513 * @idx: which EDC to access
514 * @addr: address of first byte requested
515 * @data: 64 bytes of data containing the requested address
516 * @ecc: where to store the corresponding 64-bit ECC word
518 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
519 * that covers the requested address @addr. If @parity is not %NULL it
520 * is assigned the 64-bit ECC word for the read data.
522 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
525 u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
526 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
529 edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx);
530 edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx);
531 edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx);
532 edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN,
534 edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA,
538 * These macro are missing in t4_regs.h file.
539 * Added temporarily for testing.
541 #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
542 #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
543 edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx);
544 edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx);
545 edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx);
546 edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN,
548 edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA,
554 if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST)
556 t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU);
557 t4_write_reg(adap, edc_bist_cmd_len_reg, 64);
558 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
559 t4_write_reg(adap, edc_bist_cmd_reg,
560 V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
561 i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
565 #define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i)
567 for (i = 15; i >= 0; i--)
568 *data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
570 *ecc = t4_read_reg64(adap, EDC_DATA(16));
576 * t4_mem_read - read EDC 0, EDC 1 or MC into buffer
578 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
579 * @addr: address within indicated memory type
580 * @len: amount of memory to read
581 * @buf: host memory buffer
583 * Reads an [almost] arbitrary memory region in the firmware: the
584 * firmware memory address, length and host buffer must be aligned on
585 * 32-bit boudaries. The memory is returned as a raw byte sequence from
586 * the firmware's memory. If this memory contains data structures which
587 * contain multi-byte integers, it's the callers responsibility to
588 * perform appropriate byte order conversions.
590 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
593 u32 pos, start, end, offset;
597 * Argument sanity checks ...
599 if ((addr & 0x3) || (len & 0x3))
603 * The underlaying EDC/MC read routines read 64 bytes at a time so we
604 * need to round down the start and round up the end. We'll start
605 * copying out of the first line at (addr - start) a word at a time.
607 start = addr & ~(64-1);
608 end = (addr + len + 64-1) & ~(64-1);
609 offset = (addr - start)/sizeof(__be32);
611 for (pos = start; pos < end; pos += 64, offset = 0) {
615 * Read the chip's memory block and bail if there's an error.
617 if ((mtype == MEM_MC) || (mtype == MEM_MC1))
618 ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL);
620 ret = t4_edc_read(adap, mtype, pos, data, NULL);
625 * Copy the data into the caller's memory buffer.
627 while (offset < 16 && len > 0) {
628 *buf++ = data[offset++];
629 len -= sizeof(__be32);
637 * Return the specified PCI-E Configuration Space register from our Physical
638 * Function. We try first via a Firmware LDST Command (if fw_attach != 0)
639 * since we prefer to let the firmware own all of these registers, but if that
640 * fails we go for it directly ourselves.
642 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach)
646 * If fw_attach != 0, construct and send the Firmware LDST Command to
647 * retrieve the specified PCI-E Configuration Space register.
649 if (drv_fw_attach != 0) {
650 struct fw_ldst_cmd ldst_cmd;
653 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
654 ldst_cmd.op_to_addrspace =
655 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
658 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
659 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
660 ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1);
661 ldst_cmd.u.pcie.ctrl_to_fn =
662 (F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf));
663 ldst_cmd.u.pcie.r = reg;
666 * If the LDST Command succeeds, return the result, otherwise
667 * fall through to reading it directly ourselves ...
669 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
672 return be32_to_cpu(ldst_cmd.u.pcie.data[0]);
674 CH_WARN(adap, "Firmware failed to return "
675 "Configuration Space register %d, err = %d\n",
680 * Read the desired Configuration Space register via the PCI-E
681 * Backdoor mechanism.
683 return t4_hw_pci_read_cfg4(adap, reg);
687 * t4_get_regs_len - return the size of the chips register set
688 * @adapter: the adapter
690 * Returns the size of the chip's BAR0 register space.
692 unsigned int t4_get_regs_len(struct adapter *adapter)
694 unsigned int chip_version = chip_id(adapter);
696 switch (chip_version) {
698 return T4_REGMAP_SIZE;
702 return T5_REGMAP_SIZE;
706 "Unsupported chip version %d\n", chip_version);
711 * t4_get_regs - read chip registers into provided buffer
713 * @buf: register buffer
714 * @buf_size: size (in bytes) of register buffer
716 * If the provided register buffer isn't large enough for the chip's
717 * full register range, the register dump will be truncated to the
718 * register buffer's size.
720 void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
722 static const unsigned int t4_reg_ranges[] = {
1180 static const unsigned int t5_reg_ranges[] = {
1955 static const unsigned int t6_reg_ranges[] = {
2532 u32 *buf_end = (u32 *)(buf + buf_size);
2533 const unsigned int *reg_ranges;
2534 int reg_ranges_size, range;
2535 unsigned int chip_version = chip_id(adap);
2538 * Select the right set of register ranges to dump depending on the
2539 * adapter chip type.
2541 switch (chip_version) {
2543 reg_ranges = t4_reg_ranges;
2544 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
2548 reg_ranges = t5_reg_ranges;
2549 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
2553 reg_ranges = t6_reg_ranges;
2554 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
2559 "Unsupported chip version %d\n", chip_version);
2564 * Clear the register buffer and insert the appropriate register
2565 * values selected by the above register ranges.
2567 memset(buf, 0, buf_size);
2568 for (range = 0; range < reg_ranges_size; range += 2) {
2569 unsigned int reg = reg_ranges[range];
2570 unsigned int last_reg = reg_ranges[range + 1];
2571 u32 *bufp = (u32 *)(buf + reg);
2574 * Iterate across the register range filling in the register
2575 * buffer but don't write past the end of the register buffer.
2577 while (reg <= last_reg && bufp < buf_end) {
2578 *bufp++ = t4_read_reg(adap, reg);
2585 * Partial EEPROM Vital Product Data structure. Includes only the ID and
2597 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
2599 #define EEPROM_DELAY 10 /* 10us per poll spin */
2600 #define EEPROM_MAX_POLL 5000 /* x 5000 == 50ms */
2602 #define EEPROM_STAT_ADDR 0x7bfc
2603 #define VPD_BASE 0x400
2604 #define VPD_BASE_OLD 0
2605 #define VPD_LEN 1024
2606 #define VPD_INFO_FLD_HDR_SIZE 3
2607 #define CHELSIO_VPD_UNIQUE_ID 0x82
2610 * Small utility function to wait till any outstanding VPD Access is complete.
2611 * We have a per-adapter state variable "VPD Busy" to indicate when we have a
2612 * VPD Access in flight. This allows us to handle the problem of having a
2613 * previous VPD Access time out and prevent an attempt to inject a new VPD
2614 * Request before any in-flight VPD reguest has completed.
2616 static int t4_seeprom_wait(struct adapter *adapter)
2618 unsigned int base = adapter->params.pci.vpd_cap_addr;
2622 * If no VPD Access is in flight, we can just return success right
2625 if (!adapter->vpd_busy)
2629 * Poll the VPD Capability Address/Flag register waiting for it
2630 * to indicate that the operation is complete.
2632 max_poll = EEPROM_MAX_POLL;
2636 udelay(EEPROM_DELAY);
2637 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
2640 * If the operation is complete, mark the VPD as no longer
2641 * busy and return success.
2643 if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
2644 adapter->vpd_busy = 0;
2647 } while (--max_poll);
2650 * Failure! Note that we leave the VPD Busy status set in order to
2651 * avoid pushing a new VPD Access request into the VPD Capability till
2652 * the current operation eventually succeeds. It's a bug to issue a
2653 * new request when an existing request is in flight and will result
2654 * in corrupt hardware state.
2660 * t4_seeprom_read - read a serial EEPROM location
2661 * @adapter: adapter to read
2662 * @addr: EEPROM virtual address
2663 * @data: where to store the read data
2665 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
2666 * VPD capability. Note that this function must be called with a virtual
2669 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
2671 unsigned int base = adapter->params.pci.vpd_cap_addr;
2675 * VPD Accesses must alway be 4-byte aligned!
2677 if (addr >= EEPROMVSIZE || (addr & 3))
2681 * Wait for any previous operation which may still be in flight to
2684 ret = t4_seeprom_wait(adapter);
2686 CH_ERR(adapter, "VPD still busy from previous operation\n");
2691 * Issue our new VPD Read request, mark the VPD as being busy and wait
2692 * for our request to complete. If it doesn't complete, note the
2693 * error and return it to our caller. Note that we do not reset the
2696 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
2697 adapter->vpd_busy = 1;
2698 adapter->vpd_flag = PCI_VPD_ADDR_F;
2699 ret = t4_seeprom_wait(adapter);
2701 CH_ERR(adapter, "VPD read of address %#x failed\n", addr);
2706 * Grab the returned data, swizzle it into our endianess and
2709 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
2710 *data = le32_to_cpu(*data);
2715 * t4_seeprom_write - write a serial EEPROM location
2716 * @adapter: adapter to write
2717 * @addr: virtual EEPROM address
2718 * @data: value to write
2720 * Write a 32-bit word to a location in serial EEPROM using the card's PCI
2721 * VPD capability. Note that this function must be called with a virtual
2724 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
2726 unsigned int base = adapter->params.pci.vpd_cap_addr;
2732 * VPD Accesses must alway be 4-byte aligned!
2734 if (addr >= EEPROMVSIZE || (addr & 3))
2738 * Wait for any previous operation which may still be in flight to
2741 ret = t4_seeprom_wait(adapter);
2743 CH_ERR(adapter, "VPD still busy from previous operation\n");
2748 * Issue our new VPD Read request, mark the VPD as being busy and wait
2749 * for our request to complete. If it doesn't complete, note the
2750 * error and return it to our caller. Note that we do not reset the
2753 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
2755 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
2756 (u16)addr | PCI_VPD_ADDR_F);
2757 adapter->vpd_busy = 1;
2758 adapter->vpd_flag = 0;
2759 ret = t4_seeprom_wait(adapter);
2761 CH_ERR(adapter, "VPD write of address %#x failed\n", addr);
2766 * Reset PCI_VPD_DATA register after a transaction and wait for our
2767 * request to complete. If it doesn't complete, return error.
2769 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
2770 max_poll = EEPROM_MAX_POLL;
2772 udelay(EEPROM_DELAY);
2773 t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
2774 } while ((stats_reg & 0x1) && --max_poll);
2778 /* Return success! */
2783 * t4_eeprom_ptov - translate a physical EEPROM address to virtual
2784 * @phys_addr: the physical EEPROM address
2785 * @fn: the PCI function number
2786 * @sz: size of function-specific area
2788 * Translate a physical EEPROM address to virtual. The first 1K is
2789 * accessed through virtual addresses starting at 31K, the rest is
2790 * accessed through virtual addresses starting at 0.
2792 * The mapping is as follows:
2793 * [0..1K) -> [31K..32K)
2794 * [1K..1K+A) -> [ES-A..ES)
2795 * [1K+A..ES) -> [0..ES-A-1K)
2797 * where A = @fn * @sz, and ES = EEPROM size.
2799 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2802 if (phys_addr < 1024)
2803 return phys_addr + (31 << 10);
2804 if (phys_addr < 1024 + fn)
2805 return EEPROMSIZE - fn + phys_addr - 1024;
2806 if (phys_addr < EEPROMSIZE)
2807 return phys_addr - 1024 - fn;
2812 * t4_seeprom_wp - enable/disable EEPROM write protection
2813 * @adapter: the adapter
2814 * @enable: whether to enable or disable write protection
2816 * Enables or disables write protection on the serial EEPROM.
2818 int t4_seeprom_wp(struct adapter *adapter, int enable)
2820 return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
2824 * get_vpd_keyword_val - Locates an information field keyword in the VPD
2825 * @v: Pointer to buffered vpd data structure
2826 * @kw: The keyword to search for
2828 * Returns the value of the information field keyword or
2829 * -ENOENT otherwise.
2831 static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
2834 unsigned int offset , len;
2835 const u8 *buf = (const u8 *)v;
2836 const u8 *vpdr_len = &v->vpdr_len[0];
2837 offset = sizeof(struct t4_vpd_hdr);
2838 len = (u16)vpdr_len[0] + ((u16)vpdr_len[1] << 8);
2840 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
2844 for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
2845 if(memcmp(buf + i , kw , 2) == 0){
2846 i += VPD_INFO_FLD_HDR_SIZE;
2850 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
2858 * get_vpd_params - read VPD parameters from VPD EEPROM
2859 * @adapter: adapter to read
2860 * @p: where to store the parameters
2861 * @vpd: caller provided temporary space to read the VPD into
2863 * Reads card parameters stored in VPD EEPROM.
2865 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p,
2871 const struct t4_vpd_hdr *v;
2874 * Card information normally starts at VPD_BASE but early cards had
2877 ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd));
2882 * The VPD shall have a unique identifier specified by the PCI SIG.
2883 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
2884 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
2885 * is expected to automatically put this entry at the
2886 * beginning of the VPD.
2888 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
2890 for (i = 0; i < VPD_LEN; i += 4) {
2891 ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i));
2895 v = (const struct t4_vpd_hdr *)vpd;
2897 #define FIND_VPD_KW(var,name) do { \
2898 var = get_vpd_keyword_val(v , name); \
2900 CH_ERR(adapter, "missing VPD keyword " name "\n"); \
2905 FIND_VPD_KW(i, "RV");
2906 for (csum = 0; i >= 0; i--)
2911 "corrupted VPD EEPROM, actual csum %u\n", csum);
2915 FIND_VPD_KW(ec, "EC");
2916 FIND_VPD_KW(sn, "SN");
2917 FIND_VPD_KW(pn, "PN");
2918 FIND_VPD_KW(na, "NA");
2921 memcpy(p->id, v->id_data, ID_LEN);
2923 memcpy(p->ec, vpd + ec, EC_LEN);
2925 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
2926 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
2928 i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2];
2929 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
2930 strstrip((char *)p->pn);
2931 i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2];
2932 memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
2933 strstrip((char *)p->na);
2938 /* serial flash and firmware constants and flash config file constants */
2940 SF_ATTEMPTS = 10, /* max retries for SF operations */
2942 /* flash command opcodes */
2943 SF_PROG_PAGE = 2, /* program page */
2944 SF_WR_DISABLE = 4, /* disable writes */
2945 SF_RD_STATUS = 5, /* read status register */
2946 SF_WR_ENABLE = 6, /* enable writes */
2947 SF_RD_DATA_FAST = 0xb, /* read flash */
2948 SF_RD_ID = 0x9f, /* read ID */
2949 SF_ERASE_SECTOR = 0xd8, /* erase sector */
2953 * sf1_read - read data from the serial flash
2954 * @adapter: the adapter
2955 * @byte_cnt: number of bytes to read
2956 * @cont: whether another operation will be chained
2957 * @lock: whether to lock SF for PL access only
2958 * @valp: where to store the read data
2960 * Reads up to 4 bytes of data from the serial flash. The location of
2961 * the read needs to be specified prior to calling this by issuing the
2962 * appropriate commands to the serial flash.
2964 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
2965 int lock, u32 *valp)
2969 if (!byte_cnt || byte_cnt > 4)
2971 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
2973 t4_write_reg(adapter, A_SF_OP,
2974 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
2975 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
2977 *valp = t4_read_reg(adapter, A_SF_DATA);
2982 * sf1_write - write data to the serial flash
2983 * @adapter: the adapter
2984 * @byte_cnt: number of bytes to write
2985 * @cont: whether another operation will be chained
2986 * @lock: whether to lock SF for PL access only
2987 * @val: value to write
2989 * Writes up to 4 bytes of data to the serial flash. The location of
2990 * the write needs to be specified prior to calling this by issuing the
2991 * appropriate commands to the serial flash.
2993 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
2996 if (!byte_cnt || byte_cnt > 4)
2998 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3000 t4_write_reg(adapter, A_SF_DATA, val);
3001 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
3002 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
3003 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3007 * flash_wait_op - wait for a flash operation to complete
3008 * @adapter: the adapter
3009 * @attempts: max number of polls of the status register
3010 * @delay: delay between polls in ms
3012 * Wait for a flash operation to complete by polling the status register.
3014 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
3020 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
3021 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
3025 if (--attempts == 0)
3033 * t4_read_flash - read words from serial flash
3034 * @adapter: the adapter
3035 * @addr: the start address for the read
3036 * @nwords: how many 32-bit words to read
3037 * @data: where to store the read data
3038 * @byte_oriented: whether to store data as bytes or as words
3040 * Read the specified number of 32-bit words from the serial flash.
3041 * If @byte_oriented is set the read data is stored as a byte array
3042 * (i.e., big-endian), otherwise as 32-bit words in the platform's
3043 * natural endianness.
3045 int t4_read_flash(struct adapter *adapter, unsigned int addr,
3046 unsigned int nwords, u32 *data, int byte_oriented)
3050 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
3053 addr = swab32(addr) | SF_RD_DATA_FAST;
3055 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
3056 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
3059 for ( ; nwords; nwords--, data++) {
3060 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
3062 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3066 *data = (__force __u32)(cpu_to_be32(*data));
3072 * t4_write_flash - write up to a page of data to the serial flash
3073 * @adapter: the adapter
3074 * @addr: the start address to write
3075 * @n: length of data to write in bytes
3076 * @data: the data to write
3077 * @byte_oriented: whether to store data as bytes or as words
3079 * Writes up to a page of data (256 bytes) to the serial flash starting
3080 * at the given address. All the data must be written to the same page.
3081 * If @byte_oriented is set the write data is stored as byte stream
3082 * (i.e. matches what on disk), otherwise in big-endian.
3084 int t4_write_flash(struct adapter *adapter, unsigned int addr,
3085 unsigned int n, const u8 *data, int byte_oriented)
3088 u32 buf[SF_PAGE_SIZE / 4];
3089 unsigned int i, c, left, val, offset = addr & 0xff;
3091 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
3094 val = swab32(addr) | SF_PROG_PAGE;
3096 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3097 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
3100 for (left = n; left; left -= c) {
3102 for (val = 0, i = 0; i < c; ++i)
3103 val = (val << 8) + *data++;
3106 val = cpu_to_be32(val);
3108 ret = sf1_write(adapter, c, c != left, 1, val);
3112 ret = flash_wait_op(adapter, 8, 1);
3116 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3118 /* Read the page to verify the write succeeded */
3119 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
3124 if (memcmp(data - n, (u8 *)buf + offset, n)) {
3126 "failed to correctly write the flash page at %#x\n",
3133 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3138 * t4_get_fw_version - read the firmware version
3139 * @adapter: the adapter
3140 * @vers: where to place the version
3142 * Reads the FW version from flash.
3144 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
3146 return t4_read_flash(adapter, FLASH_FW_START +
3147 offsetof(struct fw_hdr, fw_ver), 1,
3152 * t4_get_tp_version - read the TP microcode version
3153 * @adapter: the adapter
3154 * @vers: where to place the version
3156 * Reads the TP microcode version from flash.
3158 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
3160 return t4_read_flash(adapter, FLASH_FW_START +
3161 offsetof(struct fw_hdr, tp_microcode_ver),
3166 * t4_get_exprom_version - return the Expansion ROM version (if any)
3167 * @adapter: the adapter
3168 * @vers: where to place the version
3170 * Reads the Expansion ROM header from FLASH and returns the version
3171 * number (if present) through the @vers return value pointer. We return
3172 * this in the Firmware Version Format since it's convenient. Return
3173 * 0 on success, -ENOENT if no Expansion ROM is present.
3175 int t4_get_exprom_version(struct adapter *adap, u32 *vers)
3177 struct exprom_header {
3178 unsigned char hdr_arr[16]; /* must start with 0x55aa */
3179 unsigned char hdr_ver[4]; /* Expansion ROM version */
3181 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
3185 ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
3186 ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
3191 hdr = (struct exprom_header *)exprom_header_buf;
3192 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
3195 *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
3196 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
3197 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
3198 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
3203 * t4_flash_erase_sectors - erase a range of flash sectors
3204 * @adapter: the adapter
3205 * @start: the first sector to erase
3206 * @end: the last sector to erase
3208 * Erases the sectors in the given inclusive range.
3210 int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
3214 if (end >= adapter->params.sf_nsec)
3217 while (start <= end) {
3218 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3219 (ret = sf1_write(adapter, 4, 0, 1,
3220 SF_ERASE_SECTOR | (start << 8))) != 0 ||
3221 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
3223 "erase of flash sector %d failed, error %d\n",
3229 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3234 * t4_flash_cfg_addr - return the address of the flash configuration file
3235 * @adapter: the adapter
3237 * Return the address within the flash where the Firmware Configuration
3238 * File is stored, or an error if the device FLASH is too small to contain
3239 * a Firmware Configuration File.
3241 int t4_flash_cfg_addr(struct adapter *adapter)
3244 * If the device FLASH isn't large enough to hold a Firmware
3245 * Configuration File, return an error.
3247 if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
3250 return FLASH_CFG_START;
3254 * Return TRUE if the specified firmware matches the adapter. I.e. T4
3255 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
3256 * and emit an error message for mismatched firmware to save our caller the
3259 static int t4_fw_matches_chip(struct adapter *adap,
3260 const struct fw_hdr *hdr)
3263 * The expression below will return FALSE for any unsupported adapter
3264 * which will keep us "honest" in the future ...
3266 if ((is_t4(adap) && hdr->chip == FW_HDR_CHIP_T4) ||
3267 (is_t5(adap) && hdr->chip == FW_HDR_CHIP_T5) ||
3268 (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6))
3272 "FW image (%d) is not suitable for this adapter (%d)\n",
3273 hdr->chip, chip_id(adap));
3278 * t4_load_fw - download firmware
3279 * @adap: the adapter
3280 * @fw_data: the firmware image to write
3283 * Write the supplied firmware image to the card's serial flash.
3285 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3290 u8 first_page[SF_PAGE_SIZE];
3291 const u32 *p = (const u32 *)fw_data;
3292 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
3293 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
3294 unsigned int fw_start_sec;
3295 unsigned int fw_start;
3296 unsigned int fw_size;
3298 if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) {
3299 fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
3300 fw_start = FLASH_FWBOOTSTRAP_START;
3301 fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
3303 fw_start_sec = FLASH_FW_START_SEC;
3304 fw_start = FLASH_FW_START;
3305 fw_size = FLASH_FW_MAX_SIZE;
3309 CH_ERR(adap, "FW image has no data\n");
3314 "FW image size not multiple of 512 bytes\n");
3317 if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) {
3319 "FW image size differs from size in FW header\n");
3322 if (size > fw_size) {
3323 CH_ERR(adap, "FW image too large, max is %u bytes\n",
3327 if (!t4_fw_matches_chip(adap, hdr))
3330 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
3331 csum += be32_to_cpu(p[i]);
3333 if (csum != 0xffffffff) {
3335 "corrupted firmware image, checksum %#x\n", csum);
3339 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
3340 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
3345 * We write the correct version at the end so the driver can see a bad
3346 * version if the FW write fails. Start by writing a copy of the
3347 * first page with a bad version.
3349 memcpy(first_page, fw_data, SF_PAGE_SIZE);
3350 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
3351 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
3356 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
3357 addr += SF_PAGE_SIZE;
3358 fw_data += SF_PAGE_SIZE;
3359 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
3364 ret = t4_write_flash(adap,
3365 fw_start + offsetof(struct fw_hdr, fw_ver),
3366 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
3369 CH_ERR(adap, "firmware download failed, error %d\n",
3375 * t4_fwcache - firmware cache operation
3376 * @adap: the adapter
3377 * @op : the operation (flush or flush and invalidate)
3379 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
3381 struct fw_params_cmd c;
3383 memset(&c, 0, sizeof(c));
3385 cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
3386 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
3387 V_FW_PARAMS_CMD_PFN(adap->pf) |
3388 V_FW_PARAMS_CMD_VFN(0));
3389 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3391 cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3392 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE));
3393 c.param[0].val = (__force __be32)op;
3395 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
3398 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
3399 unsigned int *pif_req_wrptr,
3400 unsigned int *pif_rsp_wrptr)
3403 u32 cfg, val, req, rsp;
3405 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
3406 if (cfg & F_LADBGEN)
3407 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
3409 val = t4_read_reg(adap, A_CIM_DEBUGSTS);
3410 req = G_POLADBGWRPTR(val);
3411 rsp = G_PILADBGWRPTR(val);
3413 *pif_req_wrptr = req;
3415 *pif_rsp_wrptr = rsp;
3417 for (i = 0; i < CIM_PIFLA_SIZE; i++) {
3418 for (j = 0; j < 6; j++) {
3419 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
3420 V_PILADBGRDPTR(rsp));
3421 *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
3422 *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
3426 req = (req + 2) & M_POLADBGRDPTR;
3427 rsp = (rsp + 2) & M_PILADBGRDPTR;
3429 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
3432 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
3437 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
3438 if (cfg & F_LADBGEN)
3439 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
3441 for (i = 0; i < CIM_MALA_SIZE; i++) {
3442 for (j = 0; j < 5; j++) {
3444 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
3445 V_PILADBGRDPTR(idx));
3446 *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
3447 *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
3450 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
3453 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
3457 for (i = 0; i < 8; i++) {
3458 u32 *p = la_buf + i;
3460 t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
3461 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
3462 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
3463 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
3464 *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
3468 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
3469 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
3470 FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
3473 * t4_link_l1cfg - apply link configuration to MAC/PHY
3474 * @phy: the PHY to setup
3475 * @mac: the MAC to setup
3476 * @lc: the requested link configuration
3478 * Set up a port's MAC and PHY according to a desired link configuration.
3479 * - If the PHY can auto-negotiate first decide what to advertise, then
3480 * enable/disable auto-negotiation as desired, and reset.
3481 * - If the PHY does not auto-negotiate just reset it.
3482 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
3483 * otherwise do it later based on the outcome of auto-negotiation.
3485 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
3486 struct link_config *lc)
3488 struct fw_port_cmd c;
3489 unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
3492 if (lc->requested_fc & PAUSE_RX)
3493 fc |= FW_PORT_CAP_FC_RX;
3494 if (lc->requested_fc & PAUSE_TX)
3495 fc |= FW_PORT_CAP_FC_TX;
3497 memset(&c, 0, sizeof(c));
3498 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
3499 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
3500 V_FW_PORT_CMD_PORTID(port));
3502 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
3505 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
3506 c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
3508 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
3509 } else if (lc->autoneg == AUTONEG_DISABLE) {
3510 c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | mdi);
3511 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
3513 c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | mdi);
3515 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3519 * t4_restart_aneg - restart autonegotiation
3520 * @adap: the adapter
3521 * @mbox: mbox to use for the FW command
3522 * @port: the port id
3524 * Restarts autonegotiation for the selected port.
3526 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
3528 struct fw_port_cmd c;
3530 memset(&c, 0, sizeof(c));
3531 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
3532 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
3533 V_FW_PORT_CMD_PORTID(port));
3535 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
3537 c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
3538 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3541 typedef void (*int_handler_t)(struct adapter *adap);
3544 unsigned int mask; /* bits to check in interrupt status */
3545 const char *msg; /* message to print or NULL */
3546 short stat_idx; /* stat counter to increment or -1 */
3547 unsigned short fatal; /* whether the condition reported is fatal */
3548 int_handler_t int_handler; /* platform-specific int handler */
3552 * t4_handle_intr_status - table driven interrupt handler
3553 * @adapter: the adapter that generated the interrupt
3554 * @reg: the interrupt status register to process
3555 * @acts: table of interrupt actions
3557 * A table driven interrupt handler that applies a set of masks to an
3558 * interrupt status word and performs the corresponding actions if the
3559 * interrupts described by the mask have occurred. The actions include
3560 * optionally emitting a warning or alert message. The table is terminated
3561 * by an entry specifying mask 0. Returns the number of fatal interrupt
3564 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
3565 const struct intr_info *acts)
3568 unsigned int mask = 0;
3569 unsigned int status = t4_read_reg(adapter, reg);
3571 for ( ; acts->mask; ++acts) {
3572 if (!(status & acts->mask))
3576 CH_ALERT(adapter, "%s (0x%x)\n", acts->msg,
3577 status & acts->mask);
3578 } else if (acts->msg)
3579 CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n", acts->msg,
3580 status & acts->mask);
3581 if (acts->int_handler)
3582 acts->int_handler(adapter);
3586 if (status) /* clear processed interrupts */
3587 t4_write_reg(adapter, reg, status);
3592 * Interrupt handler for the PCIE module.
3594 static void pcie_intr_handler(struct adapter *adapter)
3596 static const struct intr_info sysbus_intr_info[] = {
3597 { F_RNPP, "RXNP array parity error", -1, 1 },
3598 { F_RPCP, "RXPC array parity error", -1, 1 },
3599 { F_RCIP, "RXCIF array parity error", -1, 1 },
3600 { F_RCCP, "Rx completions control array parity error", -1, 1 },
3601 { F_RFTP, "RXFT array parity error", -1, 1 },
3604 static const struct intr_info pcie_port_intr_info[] = {
3605 { F_TPCP, "TXPC array parity error", -1, 1 },
3606 { F_TNPP, "TXNP array parity error", -1, 1 },
3607 { F_TFTP, "TXFT array parity error", -1, 1 },
3608 { F_TCAP, "TXCA array parity error", -1, 1 },
3609 { F_TCIP, "TXCIF array parity error", -1, 1 },
3610 { F_RCAP, "RXCA array parity error", -1, 1 },
3611 { F_OTDD, "outbound request TLP discarded", -1, 1 },
3612 { F_RDPE, "Rx data parity error", -1, 1 },
3613 { F_TDUE, "Tx uncorrectable data error", -1, 1 },
3616 static const struct intr_info pcie_intr_info[] = {
3617 { F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
3618 { F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
3619 { F_MSIDATAPERR, "MSI data parity error", -1, 1 },
3620 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
3621 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
3622 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
3623 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
3624 { F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
3625 { F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
3626 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
3627 { F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
3628 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
3629 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
3630 { F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
3631 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
3632 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
3633 { F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
3634 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
3635 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
3636 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
3637 { F_FIDPERR, "PCI FID parity error", -1, 1 },
3638 { F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
3639 { F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
3640 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
3641 { F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
3642 { F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
3643 { F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
3644 { F_PCIESINT, "PCI core secondary fault", -1, 1 },
3645 { F_PCIEPINT, "PCI core primary fault", -1, 1 },
3646 { F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
3651 static const struct intr_info t5_pcie_intr_info[] = {
3652 { F_MSTGRPPERR, "Master Response Read Queue parity error",
3654 { F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
3655 { F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
3656 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
3657 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
3658 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
3659 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
3660 { F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
3662 { F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
3664 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
3665 { F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
3666 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
3667 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
3668 { F_DREQWRPERR, "PCI DMA channel write request parity error",
3670 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
3671 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
3672 { F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
3673 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
3674 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
3675 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
3676 { F_FIDPERR, "PCI FID parity error", -1, 1 },
3677 { F_VFIDPERR, "PCI INTx clear parity error", -1, 1 },
3678 { F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
3679 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
3680 { F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
3682 { F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
3684 { F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
3685 { F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
3686 { F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
3687 { F_READRSPERR, "Outbound read error", -1,
3695 fat = t4_handle_intr_status(adapter,
3696 A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
3698 t4_handle_intr_status(adapter,
3699 A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
3700 pcie_port_intr_info) +
3701 t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
3704 fat = t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
3707 t4_fatal_err(adapter);
3711 * TP interrupt handler.
3713 static void tp_intr_handler(struct adapter *adapter)
3715 static const struct intr_info tp_intr_info[] = {
3716 { 0x3fffffff, "TP parity error", -1, 1 },
3717 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
3721 if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
3722 t4_fatal_err(adapter);
3726 * SGE interrupt handler.
3728 static void sge_intr_handler(struct adapter *adapter)
3733 static const struct intr_info sge_intr_info[] = {
3734 { F_ERR_CPL_EXCEED_IQE_SIZE,
3735 "SGE received CPL exceeding IQE size", -1, 1 },
3736 { F_ERR_INVALID_CIDX_INC,
3737 "SGE GTS CIDX increment too large", -1, 0 },
3738 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
3739 { F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
3740 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
3741 "SGE IQID > 1023 received CPL for FL", -1, 0 },
3742 { F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
3744 { F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
3746 { F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
3748 { F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
3750 { F_ERR_ING_CTXT_PRIO,
3751 "SGE too many priority ingress contexts", -1, 0 },
3752 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
3753 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
3757 static const struct intr_info t4t5_sge_intr_info[] = {
3758 { F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
3759 { F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
3760 { F_ERR_EGR_CTXT_PRIO,
3761 "SGE too many priority egress contexts", -1, 0 },
3766 * For now, treat below interrupts as fatal so that we disable SGE and
3767 * get better debug */
3768 static const struct intr_info t6_sge_intr_info[] = {
3769 { F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1,
3770 "SGE PCIe error for a DBP thread", -1, 1 },
3772 "SGE Actual WRE packet is less than advertized length",
3777 v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
3778 ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
3780 CH_ALERT(adapter, "SGE parity error (%#llx)\n",
3781 (unsigned long long)v);
3782 t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
3783 t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
3786 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
3787 if (chip_id(adapter) <= CHELSIO_T5)
3788 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
3789 t4t5_sge_intr_info);
3791 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
3794 err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
3795 if (err & F_ERROR_QID_VALID) {
3796 CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
3797 if (err & F_UNCAPTURED_ERROR)
3798 CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
3799 t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
3800 F_UNCAPTURED_ERROR);
3804 t4_fatal_err(adapter);
3807 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
3808 F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
3809 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
3810 F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
3813 * CIM interrupt handler.
3815 static void cim_intr_handler(struct adapter *adapter)
3817 static const struct intr_info cim_intr_info[] = {
3818 { F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
3819 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
3820 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
3821 { F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
3822 { F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
3823 { F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
3824 { F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
3827 static const struct intr_info cim_upintr_info[] = {
3828 { F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
3829 { F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
3830 { F_ILLWRINT, "CIM illegal write", -1, 1 },
3831 { F_ILLRDINT, "CIM illegal read", -1, 1 },
3832 { F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
3833 { F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
3834 { F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
3835 { F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
3836 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
3837 { F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
3838 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
3839 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
3840 { F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
3841 { F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
3842 { F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
3843 { F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
3844 { F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
3845 { F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
3846 { F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
3847 { F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
3848 { F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
3849 { F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
3850 { F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
3851 { F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
3852 { F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
3853 { F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
3854 { F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
3855 { F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
3860 if (t4_read_reg(adapter, A_PCIE_FW) & F_PCIE_FW_ERR)
3861 t4_report_fw_error(adapter);
3863 fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
3865 t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
3868 t4_fatal_err(adapter);
3872 * ULP RX interrupt handler.
3874 static void ulprx_intr_handler(struct adapter *adapter)
3876 static const struct intr_info ulprx_intr_info[] = {
3877 { F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
3878 { F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
3879 { 0x7fffff, "ULPRX parity error", -1, 1 },
3883 if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
3884 t4_fatal_err(adapter);
3888 * ULP TX interrupt handler.
3890 static void ulptx_intr_handler(struct adapter *adapter)
3892 static const struct intr_info ulptx_intr_info[] = {
3893 { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
3895 { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
3897 { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
3899 { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
3901 { 0xfffffff, "ULPTX parity error", -1, 1 },
3905 if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
3906 t4_fatal_err(adapter);
3910 * PM TX interrupt handler.
3912 static void pmtx_intr_handler(struct adapter *adapter)
3914 static const struct intr_info pmtx_intr_info[] = {
3915 { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
3916 { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
3917 { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
3918 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
3919 { 0xffffff0, "PMTX framing error", -1, 1 },
3920 { F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
3921 { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
3923 { F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
3924 { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
3928 if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
3929 t4_fatal_err(adapter);
3933 * PM RX interrupt handler.
3935 static void pmrx_intr_handler(struct adapter *adapter)
3937 static const struct intr_info pmrx_intr_info[] = {
3938 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
3939 { 0x3ffff0, "PMRX framing error", -1, 1 },
3940 { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
3941 { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
3943 { F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
3944 { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
3948 if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
3949 t4_fatal_err(adapter);
3953 * CPL switch interrupt handler.
3955 static void cplsw_intr_handler(struct adapter *adapter)
3957 static const struct intr_info cplsw_intr_info[] = {
3958 { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
3959 { F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
3960 { F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
3961 { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
3962 { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
3963 { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
3967 if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
3968 t4_fatal_err(adapter);
3972 * LE interrupt handler.
3974 static void le_intr_handler(struct adapter *adap)
3976 unsigned int chip_ver = chip_id(adap);
3977 static const struct intr_info le_intr_info[] = {
3978 { F_LIPMISS, "LE LIP miss", -1, 0 },
3979 { F_LIP0, "LE 0 LIP error", -1, 0 },
3980 { F_PARITYERR, "LE parity error", -1, 1 },
3981 { F_UNKNOWNCMD, "LE unknown command", -1, 1 },
3982 { F_REQQPARERR, "LE request queue parity error", -1, 1 },
3986 static const struct intr_info t6_le_intr_info[] = {
3987 { F_T6_LIPMISS, "LE LIP miss", -1, 0 },
3988 { F_T6_LIP0, "LE 0 LIP error", -1, 0 },
3989 { F_TCAMINTPERR, "LE parity error", -1, 1 },
3990 { F_T6_UNKNOWNCMD, "LE unknown command", -1, 1 },
3991 { F_SSRAMINTPERR, "LE request queue parity error", -1, 1 },
3995 if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE,
3996 (chip_ver <= CHELSIO_T5) ?
3997 le_intr_info : t6_le_intr_info))
4002 * MPS interrupt handler.
4004 static void mps_intr_handler(struct adapter *adapter)
4006 static const struct intr_info mps_rx_intr_info[] = {
4007 { 0xffffff, "MPS Rx parity error", -1, 1 },
4010 static const struct intr_info mps_tx_intr_info[] = {
4011 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
4012 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4013 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
4015 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
4017 { F_BUBBLE, "MPS Tx underflow", -1, 1 },
4018 { F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
4019 { F_FRMERR, "MPS Tx framing error", -1, 1 },
4022 static const struct intr_info mps_trc_intr_info[] = {
4023 { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
4024 { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
4026 { F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
4029 static const struct intr_info mps_stat_sram_intr_info[] = {
4030 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
4033 static const struct intr_info mps_stat_tx_intr_info[] = {
4034 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
4037 static const struct intr_info mps_stat_rx_intr_info[] = {
4038 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
4041 static const struct intr_info mps_cls_intr_info[] = {
4042 { F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
4043 { F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
4044 { F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
4050 fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
4052 t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
4054 t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
4055 mps_trc_intr_info) +
4056 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
4057 mps_stat_sram_intr_info) +
4058 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
4059 mps_stat_tx_intr_info) +
4060 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
4061 mps_stat_rx_intr_info) +
4062 t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
4065 t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
4066 t4_read_reg(adapter, A_MPS_INT_CAUSE); /* flush */
4068 t4_fatal_err(adapter);
4071 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | \
4075 * EDC/MC interrupt handler.
4077 static void mem_intr_handler(struct adapter *adapter, int idx)
4079 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
4081 unsigned int addr, cnt_addr, v;
4083 if (idx <= MEM_EDC1) {
4084 addr = EDC_REG(A_EDC_INT_CAUSE, idx);
4085 cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
4086 } else if (idx == MEM_MC) {
4087 if (is_t4(adapter)) {
4088 addr = A_MC_INT_CAUSE;
4089 cnt_addr = A_MC_ECC_STATUS;
4091 addr = A_MC_P_INT_CAUSE;
4092 cnt_addr = A_MC_P_ECC_STATUS;
4095 addr = MC_REG(A_MC_P_INT_CAUSE, 1);
4096 cnt_addr = MC_REG(A_MC_P_ECC_STATUS, 1);
4099 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
4100 if (v & F_PERR_INT_CAUSE)
4101 CH_ALERT(adapter, "%s FIFO parity error\n",
4103 if (v & F_ECC_CE_INT_CAUSE) {
4104 u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
4106 t4_edc_err_read(adapter, idx);
4108 t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
4109 CH_WARN_RATELIMIT(adapter,
4110 "%u %s correctable ECC data error%s\n",
4111 cnt, name[idx], cnt > 1 ? "s" : "");
4113 if (v & F_ECC_UE_INT_CAUSE)
4115 "%s uncorrectable ECC data error\n", name[idx]);
4117 t4_write_reg(adapter, addr, v);
4118 if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
4119 t4_fatal_err(adapter);
4123 * MA interrupt handler.
4125 static void ma_intr_handler(struct adapter *adapter)
4127 u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
4129 if (status & F_MEM_PERR_INT_CAUSE) {
4131 "MA parity error, parity status %#x\n",
4132 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS1));
4135 "MA parity error, parity status %#x\n",
4136 t4_read_reg(adapter,
4137 A_MA_PARITY_ERROR_STATUS2));
4139 if (status & F_MEM_WRAP_INT_CAUSE) {
4140 v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
4141 CH_ALERT(adapter, "MA address wrap-around error by "
4142 "client %u to address %#x\n",
4143 G_MEM_WRAP_CLIENT_NUM(v),
4144 G_MEM_WRAP_ADDRESS(v) << 4);
4146 t4_write_reg(adapter, A_MA_INT_CAUSE, status);
4147 t4_fatal_err(adapter);
4151 * SMB interrupt handler.
4153 static void smb_intr_handler(struct adapter *adap)
4155 static const struct intr_info smb_intr_info[] = {
4156 { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
4157 { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
4158 { F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
4162 if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
4167 * NC-SI interrupt handler.
4169 static void ncsi_intr_handler(struct adapter *adap)
4171 static const struct intr_info ncsi_intr_info[] = {
4172 { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
4173 { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
4174 { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
4175 { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
4179 if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
4184 * XGMAC interrupt handler.
4186 static void xgmac_intr_handler(struct adapter *adap, int port)
4188 u32 v, int_cause_reg;
4191 int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
4193 int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
4195 v = t4_read_reg(adap, int_cause_reg);
4197 v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR);
4201 if (v & F_TXFIFO_PRTY_ERR)
4202 CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n",
4204 if (v & F_RXFIFO_PRTY_ERR)
4205 CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n",
4207 t4_write_reg(adap, int_cause_reg, v);
4212 * PL interrupt handler.
4214 static void pl_intr_handler(struct adapter *adap)
4216 static const struct intr_info pl_intr_info[] = {
4217 { F_FATALPERR, "Fatal parity error", -1, 1 },
4218 { F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
4222 static const struct intr_info t5_pl_intr_info[] = {
4223 { F_FATALPERR, "Fatal parity error", -1, 1 },
4227 if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE,
4229 pl_intr_info : t5_pl_intr_info))
4233 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
4236 * t4_slow_intr_handler - control path interrupt handler
4237 * @adapter: the adapter
4239 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
4240 * The designation 'slow' is because it involves register reads, while
4241 * data interrupts typically don't involve any MMIOs.
4243 int t4_slow_intr_handler(struct adapter *adapter)
4245 u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
4247 if (!(cause & GLBL_INTR_MASK))
4250 cim_intr_handler(adapter);
4252 mps_intr_handler(adapter);
4254 ncsi_intr_handler(adapter);
4256 pl_intr_handler(adapter);
4258 smb_intr_handler(adapter);
4260 xgmac_intr_handler(adapter, 0);
4262 xgmac_intr_handler(adapter, 1);
4264 xgmac_intr_handler(adapter, 2);
4266 xgmac_intr_handler(adapter, 3);
4268 pcie_intr_handler(adapter);
4270 mem_intr_handler(adapter, MEM_MC);
4271 if (is_t5(adapter) && (cause & F_MC1))
4272 mem_intr_handler(adapter, MEM_MC1);
4274 mem_intr_handler(adapter, MEM_EDC0);
4276 mem_intr_handler(adapter, MEM_EDC1);
4278 le_intr_handler(adapter);
4280 tp_intr_handler(adapter);
4282 ma_intr_handler(adapter);
4283 if (cause & F_PM_TX)
4284 pmtx_intr_handler(adapter);
4285 if (cause & F_PM_RX)
4286 pmrx_intr_handler(adapter);
4287 if (cause & F_ULP_RX)
4288 ulprx_intr_handler(adapter);
4289 if (cause & F_CPL_SWITCH)
4290 cplsw_intr_handler(adapter);
4292 sge_intr_handler(adapter);
4293 if (cause & F_ULP_TX)
4294 ulptx_intr_handler(adapter);
4296 /* Clear the interrupts just processed for which we are the master. */
4297 t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
4298 (void)t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
4303 * t4_intr_enable - enable interrupts
4304 * @adapter: the adapter whose interrupts should be enabled
4306 * Enable PF-specific interrupts for the calling function and the top-level
4307 * interrupt concentrator for global interrupts. Interrupts are already
4308 * enabled at each module, here we just enable the roots of the interrupt
4311 * Note: this function should be called only when the driver manages
4312 * non PF-specific interrupts from the various HW modules. Only one PCI
4313 * function at a time should be doing this.
4315 void t4_intr_enable(struct adapter *adapter)
4318 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4319 u32 pf = (chip_id(adapter) <= CHELSIO_T5
4320 ? G_SOURCEPF(whoami)
4321 : G_T6_SOURCEPF(whoami));
4323 if (chip_id(adapter) <= CHELSIO_T5)
4324 val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
4326 val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN;
4327 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
4328 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
4329 F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR |
4330 F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
4331 F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
4332 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
4333 F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val);
4334 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
4335 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
4339 * t4_intr_disable - disable interrupts
4340 * @adapter: the adapter whose interrupts should be disabled
4342 * Disable interrupts. We only disable the top-level interrupt
4343 * concentrators. The caller must be a PCI function managing global
4346 void t4_intr_disable(struct adapter *adapter)
4348 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4349 u32 pf = (chip_id(adapter) <= CHELSIO_T5
4350 ? G_SOURCEPF(whoami)
4351 : G_T6_SOURCEPF(whoami));
4353 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
4354 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
4358 * t4_intr_clear - clear all interrupts
4359 * @adapter: the adapter whose interrupts should be cleared
4361 * Clears all interrupts. The caller must be a PCI function managing
4362 * global interrupts.
4364 void t4_intr_clear(struct adapter *adapter)
4366 static const unsigned int cause_reg[] = {
4367 A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
4368 A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
4369 A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS1, A_MA_INT_CAUSE,
4370 A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
4371 A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
4372 MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
4374 A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE,
4375 A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE,
4376 A_MPS_RX_PERR_INT_CAUSE,
4378 MYPF_REG(A_PL_PF_INT_CAUSE),
4385 for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
4386 t4_write_reg(adapter, cause_reg[i], 0xffffffff);
4388 t4_write_reg(adapter, is_t4(adapter) ? A_MC_INT_CAUSE :
4389 A_MC_P_INT_CAUSE, 0xffffffff);
4391 if (is_t4(adapter)) {
4392 t4_write_reg(adapter, A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
4394 t4_write_reg(adapter, A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
4397 t4_write_reg(adapter, A_MA_PARITY_ERROR_STATUS2, 0xffffffff);
4399 t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
4400 (void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
4404 * hash_mac_addr - return the hash value of a MAC address
4405 * @addr: the 48-bit Ethernet MAC address
4407 * Hashes a MAC address according to the hash function used by HW inexact
4408 * (hash) address matching.
4410 static int hash_mac_addr(const u8 *addr)
4412 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
4413 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
4421 * t4_config_rss_range - configure a portion of the RSS mapping table
4422 * @adapter: the adapter
4423 * @mbox: mbox to use for the FW command
4424 * @viid: virtual interface whose RSS subtable is to be written
4425 * @start: start entry in the table to write
4426 * @n: how many table entries to write
4427 * @rspq: values for the "response queue" (Ingress Queue) lookup table
4428 * @nrspq: number of values in @rspq
4430 * Programs the selected part of the VI's RSS mapping table with the
4431 * provided values. If @nrspq < @n the supplied values are used repeatedly
4432 * until the full table range is populated.
4434 * The caller must ensure the values in @rspq are in the range allowed for
4437 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
4438 int start, int n, const u16 *rspq, unsigned int nrspq)
4441 const u16 *rsp = rspq;
4442 const u16 *rsp_end = rspq + nrspq;
4443 struct fw_rss_ind_tbl_cmd cmd;
4445 memset(&cmd, 0, sizeof(cmd));
4446 cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
4447 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4448 V_FW_RSS_IND_TBL_CMD_VIID(viid));
4449 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
4452 * Each firmware RSS command can accommodate up to 32 RSS Ingress
4453 * Queue Identifiers. These Ingress Queue IDs are packed three to
4454 * a 32-bit word as 10-bit values with the upper remaining 2 bits
4458 int nq = min(n, 32);
4460 __be32 *qp = &cmd.iq0_to_iq2;
4463 * Set up the firmware RSS command header to send the next
4464 * "nq" Ingress Queue IDs to the firmware.
4466 cmd.niqid = cpu_to_be16(nq);
4467 cmd.startidx = cpu_to_be16(start);
4470 * "nq" more done for the start of the next loop.
4476 * While there are still Ingress Queue IDs to stuff into the
4477 * current firmware RSS command, retrieve them from the
4478 * Ingress Queue ID array and insert them into the command.
4482 * Grab up to the next 3 Ingress Queue IDs (wrapping
4483 * around the Ingress Queue ID array if necessary) and
4484 * insert them into the firmware RSS command at the
4485 * current 3-tuple position within the commad.
4489 int nqbuf = min(3, nq);
4492 qbuf[0] = qbuf[1] = qbuf[2] = 0;
4493 while (nqbuf && nq_packed < 32) {
4500 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
4501 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
4502 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
4506 * Send this portion of the RRS table update to the firmware;
4507 * bail out on any errors.
4509 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
4517 * t4_config_glbl_rss - configure the global RSS mode
4518 * @adapter: the adapter
4519 * @mbox: mbox to use for the FW command
4520 * @mode: global RSS mode
4521 * @flags: mode-specific flags
4523 * Sets the global RSS mode.
4525 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
4528 struct fw_rss_glb_config_cmd c;
4530 memset(&c, 0, sizeof(c));
4531 c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
4532 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
4533 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4534 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
4535 c.u.manual.mode_pkd =
4536 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
4537 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
4538 c.u.basicvirtual.mode_pkd =
4539 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
4540 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
4543 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
4547 * t4_config_vi_rss - configure per VI RSS settings
4548 * @adapter: the adapter
4549 * @mbox: mbox to use for the FW command
4552 * @defq: id of the default RSS queue for the VI.
4554 * Configures VI-specific RSS properties.
4556 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
4557 unsigned int flags, unsigned int defq)
4559 struct fw_rss_vi_config_cmd c;
4561 memset(&c, 0, sizeof(c));
4562 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
4563 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4564 V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
4565 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4566 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
4567 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
4568 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
4571 /* Read an RSS table row */
4572 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
4574 t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
4575 return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
4580 * t4_read_rss - read the contents of the RSS mapping table
4581 * @adapter: the adapter
4582 * @map: holds the contents of the RSS mapping table
4584 * Reads the contents of the RSS hash->queue mapping table.
4586 int t4_read_rss(struct adapter *adapter, u16 *map)
4591 for (i = 0; i < RSS_NENTRIES / 2; ++i) {
4592 ret = rd_rss_row(adapter, i, &val);
4595 *map++ = G_LKPTBLQUEUE0(val);
4596 *map++ = G_LKPTBLQUEUE1(val);
4602 * t4_fw_tp_pio_rw - Access TP PIO through LDST
4603 * @adap: the adapter
4604 * @vals: where the indirect register values are stored/written
4605 * @nregs: how many indirect registers to read/write
4606 * @start_idx: index of first indirect register to read/write
4607 * @rw: Read (1) or Write (0)
4609 * Access TP PIO registers through LDST
4611 void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
4612 unsigned int start_index, unsigned int rw)
4615 int cmd = FW_LDST_ADDRSPC_TP_PIO;
4616 struct fw_ldst_cmd c;
4618 for (i = 0 ; i < nregs; i++) {
4619 memset(&c, 0, sizeof(c));
4620 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
4622 (rw ? F_FW_CMD_READ :
4624 V_FW_LDST_CMD_ADDRSPACE(cmd));
4625 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
4627 c.u.addrval.addr = cpu_to_be32(start_index + i);
4628 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
4629 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4632 vals[i] = be32_to_cpu(c.u.addrval.val);
4638 * t4_read_rss_key - read the global RSS key
4639 * @adap: the adapter
4640 * @key: 10-entry array holding the 320-bit RSS key
4642 * Reads the global 320-bit RSS key.
4644 void t4_read_rss_key(struct adapter *adap, u32 *key)
4646 if (t4_use_ldst(adap))
4647 t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 1);
4649 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
4650 A_TP_RSS_SECRET_KEY0);
4654 * t4_write_rss_key - program one of the RSS keys
4655 * @adap: the adapter
4656 * @key: 10-entry array holding the 320-bit RSS key
4657 * @idx: which RSS key to write
4659 * Writes one of the RSS keys with the given 320-bit value. If @idx is
4660 * 0..15 the corresponding entry in the RSS key table is written,
4661 * otherwise the global RSS key is written.
4663 void t4_write_rss_key(struct adapter *adap, u32 *key, int idx)
4665 u8 rss_key_addr_cnt = 16;
4666 u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
4669 * T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
4670 * allows access to key addresses 16-63 by using KeyWrAddrX
4671 * as index[5:4](upper 2) into key table
4673 if ((chip_id(adap) > CHELSIO_T5) &&
4674 (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
4675 rss_key_addr_cnt = 32;
4677 if (t4_use_ldst(adap))
4678 t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 0);
4680 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
4681 A_TP_RSS_SECRET_KEY0);
4683 if (idx >= 0 && idx < rss_key_addr_cnt) {
4684 if (rss_key_addr_cnt > 16)
4685 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
4686 V_KEYWRADDRX(idx >> 4) |
4687 V_T6_VFWRADDR(idx) | F_KEYWREN);
4689 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
4690 V_KEYWRADDR(idx) | F_KEYWREN);
4695 * t4_read_rss_pf_config - read PF RSS Configuration Table
4696 * @adapter: the adapter
4697 * @index: the entry in the PF RSS table to read
4698 * @valp: where to store the returned value
4700 * Reads the PF RSS Configuration Table at the specified index and returns
4701 * the value found there.
4703 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
4706 if (t4_use_ldst(adapter))
4707 t4_fw_tp_pio_rw(adapter, valp, 1,
4708 A_TP_RSS_PF0_CONFIG + index, 1);
4710 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4711 valp, 1, A_TP_RSS_PF0_CONFIG + index);
4715 * t4_write_rss_pf_config - write PF RSS Configuration Table
4716 * @adapter: the adapter
4717 * @index: the entry in the VF RSS table to read
4718 * @val: the value to store
4720 * Writes the PF RSS Configuration Table at the specified index with the
4723 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index,
4726 if (t4_use_ldst(adapter))
4727 t4_fw_tp_pio_rw(adapter, &val, 1,
4728 A_TP_RSS_PF0_CONFIG + index, 0);
4730 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4731 &val, 1, A_TP_RSS_PF0_CONFIG + index);
4735 * t4_read_rss_vf_config - read VF RSS Configuration Table
4736 * @adapter: the adapter
4737 * @index: the entry in the VF RSS table to read
4738 * @vfl: where to store the returned VFL
4739 * @vfh: where to store the returned VFH
4741 * Reads the VF RSS Configuration Table at the specified index and returns
4742 * the (VFL, VFH) values found there.
4744 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
4747 u32 vrt, mask, data;
4749 if (chip_id(adapter) <= CHELSIO_T5) {
4750 mask = V_VFWRADDR(M_VFWRADDR);
4751 data = V_VFWRADDR(index);
4753 mask = V_T6_VFWRADDR(M_T6_VFWRADDR);
4754 data = V_T6_VFWRADDR(index);
4757 * Request that the index'th VF Table values be read into VFL/VFH.
4759 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
4760 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
4761 vrt |= data | F_VFRDEN;
4762 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
4765 * Grab the VFL/VFH values ...
4767 if (t4_use_ldst(adapter)) {
4768 t4_fw_tp_pio_rw(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, 1);
4769 t4_fw_tp_pio_rw(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, 1);
4771 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4772 vfl, 1, A_TP_RSS_VFL_CONFIG);
4773 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4774 vfh, 1, A_TP_RSS_VFH_CONFIG);
4779 * t4_write_rss_vf_config - write VF RSS Configuration Table
4781 * @adapter: the adapter
4782 * @index: the entry in the VF RSS table to write
4783 * @vfl: the VFL to store
4784 * @vfh: the VFH to store
4786 * Writes the VF RSS Configuration Table at the specified index with the
4787 * specified (VFL, VFH) values.
4789 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
4792 u32 vrt, mask, data;
4794 if (chip_id(adapter) <= CHELSIO_T5) {
4795 mask = V_VFWRADDR(M_VFWRADDR);
4796 data = V_VFWRADDR(index);
4798 mask = V_T6_VFWRADDR(M_T6_VFWRADDR);
4799 data = V_T6_VFWRADDR(index);
4803 * Load up VFL/VFH with the values to be written ...
4805 if (t4_use_ldst(adapter)) {
4806 t4_fw_tp_pio_rw(adapter, &vfl, 1, A_TP_RSS_VFL_CONFIG, 0);
4807 t4_fw_tp_pio_rw(adapter, &vfh, 1, A_TP_RSS_VFH_CONFIG, 0);
4809 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4810 &vfl, 1, A_TP_RSS_VFL_CONFIG);
4811 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4812 &vfh, 1, A_TP_RSS_VFH_CONFIG);
4816 * Write the VFL/VFH into the VF Table at index'th location.
4818 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
4819 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
4820 vrt |= data | F_VFRDEN;
4821 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
4825 * t4_read_rss_pf_map - read PF RSS Map
4826 * @adapter: the adapter
4828 * Reads the PF RSS Map register and returns its value.
4830 u32 t4_read_rss_pf_map(struct adapter *adapter)
4834 if (t4_use_ldst(adapter))
4835 t4_fw_tp_pio_rw(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, 1);
4837 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4838 &pfmap, 1, A_TP_RSS_PF_MAP);
4843 * t4_write_rss_pf_map - write PF RSS Map
4844 * @adapter: the adapter
4845 * @pfmap: PF RSS Map value
4847 * Writes the specified value to the PF RSS Map register.
4849 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap)
4851 if (t4_use_ldst(adapter))
4852 t4_fw_tp_pio_rw(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, 0);
4854 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4855 &pfmap, 1, A_TP_RSS_PF_MAP);
4859 * t4_read_rss_pf_mask - read PF RSS Mask
4860 * @adapter: the adapter
4862 * Reads the PF RSS Mask register and returns its value.
4864 u32 t4_read_rss_pf_mask(struct adapter *adapter)
4868 if (t4_use_ldst(adapter))
4869 t4_fw_tp_pio_rw(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, 1);
4871 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4872 &pfmask, 1, A_TP_RSS_PF_MSK);
4877 * t4_write_rss_pf_mask - write PF RSS Mask
4878 * @adapter: the adapter
4879 * @pfmask: PF RSS Mask value
4881 * Writes the specified value to the PF RSS Mask register.
4883 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask)
4885 if (t4_use_ldst(adapter))
4886 t4_fw_tp_pio_rw(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, 0);
4888 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4889 &pfmask, 1, A_TP_RSS_PF_MSK);
4893 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
4894 * @adap: the adapter
4895 * @v4: holds the TCP/IP counter values
4896 * @v6: holds the TCP/IPv6 counter values
4898 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
4899 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
4901 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
4902 struct tp_tcp_stats *v6)
4904 u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
4906 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
4907 #define STAT(x) val[STAT_IDX(x)]
4908 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
4911 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
4912 ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST);
4913 v4->tcp_out_rsts = STAT(OUT_RST);
4914 v4->tcp_in_segs = STAT64(IN_SEG);
4915 v4->tcp_out_segs = STAT64(OUT_SEG);
4916 v4->tcp_retrans_segs = STAT64(RXT_SEG);
4919 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
4920 ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST);
4921 v6->tcp_out_rsts = STAT(OUT_RST);
4922 v6->tcp_in_segs = STAT64(IN_SEG);
4923 v6->tcp_out_segs = STAT64(OUT_SEG);
4924 v6->tcp_retrans_segs = STAT64(RXT_SEG);
4932 * t4_tp_get_err_stats - read TP's error MIB counters
4933 * @adap: the adapter
4934 * @st: holds the counter values
4936 * Returns the values of TP's error counters.
4938 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
4940 int nchan = adap->chip_params->nchan;
4942 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
4943 st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0);
4944 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
4945 st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0);
4946 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
4947 st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0);
4948 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
4949 st->tnl_cong_drops, nchan, A_TP_MIB_TNL_CNG_DROP_0);
4950 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
4951 st->ofld_chan_drops, nchan, A_TP_MIB_OFD_CHN_DROP_0);
4952 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
4953 st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0);
4954 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
4955 st->ofld_vlan_drops, nchan, A_TP_MIB_OFD_VLN_DROP_0);
4956 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
4957 st->tcp6_in_errs, nchan, A_TP_MIB_TCP_V6IN_ERR_0);
4959 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
4960 &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP);
4964 * t4_tp_get_proxy_stats - read TP's proxy MIB counters
4965 * @adap: the adapter
4966 * @st: holds the counter values
4968 * Returns the values of TP's proxy counters.
4970 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st)
4972 int nchan = adap->chip_params->nchan;
4974 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy,
4975 nchan, A_TP_MIB_TNL_LPBK_0);
4979 * t4_tp_get_cpl_stats - read TP's CPL MIB counters
4980 * @adap: the adapter
4981 * @st: holds the counter values
4983 * Returns the values of TP's CPL counters.
4985 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
4987 int nchan = adap->chip_params->nchan;
4989 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req,
4990 nchan, A_TP_MIB_CPL_IN_REQ_0);
4991 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->rsp,
4992 nchan, A_TP_MIB_CPL_OUT_RSP_0);
4996 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
4997 * @adap: the adapter
4998 * @st: holds the counter values
5000 * Returns the values of TP's RDMA counters.
5002 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
5004 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_pkt,
5005 2, A_TP_MIB_RQE_DFR_PKT);
5009 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
5010 * @adap: the adapter
5011 * @idx: the port index
5012 * @st: holds the counter values
5014 * Returns the values of TP's FCoE counters for the selected port.
5016 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
5017 struct tp_fcoe_stats *st)
5021 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->frames_ddp,
5022 1, A_TP_MIB_FCOE_DDP_0 + idx);
5023 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->frames_drop,
5024 1, A_TP_MIB_FCOE_DROP_0 + idx);
5025 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
5026 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx);
5027 st->octets_ddp = ((u64)val[0] << 32) | val[1];
5031 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
5032 * @adap: the adapter
5033 * @st: holds the counter values
5035 * Returns the values of TP's counters for non-TCP directly-placed packets.
5037 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
5041 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4,
5043 st->frames = val[0];
5045 st->octets = ((u64)val[2] << 32) | val[3];
5049 * t4_read_mtu_tbl - returns the values in the HW path MTU table
5050 * @adap: the adapter
5051 * @mtus: where to store the MTU values
5052 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
5054 * Reads the HW path MTU table.
5056 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
5061 for (i = 0; i < NMTUS; ++i) {
5062 t4_write_reg(adap, A_TP_MTU_TABLE,
5063 V_MTUINDEX(0xff) | V_MTUVALUE(i));
5064 v = t4_read_reg(adap, A_TP_MTU_TABLE);
5065 mtus[i] = G_MTUVALUE(v);
5067 mtu_log[i] = G_MTUWIDTH(v);
5072 * t4_read_cong_tbl - reads the congestion control table
5073 * @adap: the adapter
5074 * @incr: where to store the alpha values
5076 * Reads the additive increments programmed into the HW congestion
5079 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
5081 unsigned int mtu, w;
5083 for (mtu = 0; mtu < NMTUS; ++mtu)
5084 for (w = 0; w < NCCTRL_WIN; ++w) {
5085 t4_write_reg(adap, A_TP_CCTRL_TABLE,
5086 V_ROWINDEX(0xffff) | (mtu << 5) | w);
5087 incr[mtu][w] = (u16)t4_read_reg(adap,
5088 A_TP_CCTRL_TABLE) & 0x1fff;
5093 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
5094 * @adap: the adapter
5095 * @addr: the indirect TP register address
5096 * @mask: specifies the field within the register to modify
5097 * @val: new value for the field
5099 * Sets a field of an indirect TP register to the given value.
5101 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
5102 unsigned int mask, unsigned int val)
5104 t4_write_reg(adap, A_TP_PIO_ADDR, addr);
5105 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
5106 t4_write_reg(adap, A_TP_PIO_DATA, val);
5110 * init_cong_ctrl - initialize congestion control parameters
5111 * @a: the alpha values for congestion control
5112 * @b: the beta values for congestion control
5114 * Initialize the congestion control parameters.
5116 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
5118 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
5143 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
5146 b[13] = b[14] = b[15] = b[16] = 3;
5147 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
5148 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
5153 /* The minimum additive increment value for the congestion control table */
5154 #define CC_MIN_INCR 2U
5157 * t4_load_mtus - write the MTU and congestion control HW tables
5158 * @adap: the adapter
5159 * @mtus: the values for the MTU table
5160 * @alpha: the values for the congestion control alpha parameter
5161 * @beta: the values for the congestion control beta parameter
5163 * Write the HW MTU table with the supplied MTUs and the high-speed
5164 * congestion control table with the supplied alpha, beta, and MTUs.
5165 * We write the two tables together because the additive increments
5166 * depend on the MTUs.
5168 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
5169 const unsigned short *alpha, const unsigned short *beta)
5171 static const unsigned int avg_pkts[NCCTRL_WIN] = {
5172 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
5173 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
5174 28672, 40960, 57344, 81920, 114688, 163840, 229376
5179 for (i = 0; i < NMTUS; ++i) {
5180 unsigned int mtu = mtus[i];
5181 unsigned int log2 = fls(mtu);
5183 if (!(mtu & ((1 << log2) >> 2))) /* round */
5185 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
5186 V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
5188 for (w = 0; w < NCCTRL_WIN; ++w) {
5191 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
5194 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
5195 (w << 16) | (beta[w] << 13) | inc);
5201 * t4_set_pace_tbl - set the pace table
5202 * @adap: the adapter
5203 * @pace_vals: the pace values in microseconds
5204 * @start: index of the first entry in the HW pace table to set
5205 * @n: how many entries to set
5207 * Sets (a subset of the) HW pace table.
5209 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
5210 unsigned int start, unsigned int n)
5212 unsigned int vals[NTX_SCHED], i;
5213 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
5218 /* convert values from us to dack ticks, rounding to closest value */
5219 for (i = 0; i < n; i++, pace_vals++) {
5220 vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
5221 if (vals[i] > 0x7ff)
5223 if (*pace_vals && vals[i] == 0)
5226 for (i = 0; i < n; i++, start++)
5227 t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
5232 * t4_set_sched_bps - set the bit rate for a HW traffic scheduler
5233 * @adap: the adapter
5234 * @kbps: target rate in Kbps
5235 * @sched: the scheduler index
5237 * Configure a Tx HW scheduler for the target rate.
5239 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
5241 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
5242 unsigned int clk = adap->params.vpd.cclk * 1000;
5243 unsigned int selected_cpt = 0, selected_bpt = 0;
5246 kbps *= 125; /* -> bytes */
5247 for (cpt = 1; cpt <= 255; cpt++) {
5249 bpt = (kbps + tps / 2) / tps;
5250 if (bpt > 0 && bpt <= 255) {
5252 delta = v >= kbps ? v - kbps : kbps - v;
5253 if (delta < mindelta) {
5258 } else if (selected_cpt)
5264 t4_write_reg(adap, A_TP_TM_PIO_ADDR,
5265 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
5266 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
5268 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
5270 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
5271 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
5276 * t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
5277 * @adap: the adapter
5278 * @sched: the scheduler index
5279 * @ipg: the interpacket delay in tenths of nanoseconds
5281 * Set the interpacket delay for a HW packet rate scheduler.
5283 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
5285 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
5287 /* convert ipg to nearest number of core clocks */
5288 ipg *= core_ticks_per_usec(adap);
5289 ipg = (ipg + 5000) / 10000;
5290 if (ipg > M_TXTIMERSEPQ0)
5293 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
5294 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
5296 v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
5298 v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
5299 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
5300 t4_read_reg(adap, A_TP_TM_PIO_DATA);
5305 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
5306 * clocks. The formula is
5308 * bytes/s = bytes256 * 256 * ClkFreq / 4096
5310 * which is equivalent to
5312 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
5314 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
5316 u64 v = bytes256 * adap->params.vpd.cclk;
5318 return v * 62 + v / 2;
5322 * t4_get_chan_txrate - get the current per channel Tx rates
5323 * @adap: the adapter
5324 * @nic_rate: rates for NIC traffic
5325 * @ofld_rate: rates for offloaded traffic
5327 * Return the current Tx rates in bytes/s for NIC and offloaded traffic
5330 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
5334 v = t4_read_reg(adap, A_TP_TX_TRATE);
5335 nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
5336 nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
5337 if (adap->chip_params->nchan > 2) {
5338 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
5339 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
5342 v = t4_read_reg(adap, A_TP_TX_ORATE);
5343 ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
5344 ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
5345 if (adap->chip_params->nchan > 2) {
5346 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
5347 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
5352 * t4_set_trace_filter - configure one of the tracing filters
5353 * @adap: the adapter
5354 * @tp: the desired trace filter parameters
5355 * @idx: which filter to configure
5356 * @enable: whether to enable or disable the filter
5358 * Configures one of the tracing filters available in HW. If @tp is %NULL
5359 * it indicates that the filter is already written in the register and it
5360 * just needs to be enabled or disabled.
5362 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
5363 int idx, int enable)
5365 int i, ofst = idx * 4;
5366 u32 data_reg, mask_reg, cfg;
5367 u32 multitrc = F_TRCMULTIFILTER;
5368 u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN;
5370 if (idx < 0 || idx >= NTRACE)
5373 if (tp == NULL || !enable) {
5374 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en,
5380 * TODO - After T4 data book is updated, specify the exact
5383 * See T4 data book - MPS section for a complete description
5384 * of the below if..else handling of A_MPS_TRC_CFG register
5387 cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
5388 if (cfg & F_TRCMULTIFILTER) {
5390 * If multiple tracers are enabled, then maximum
5391 * capture size is 2.5KB (FIFO size of a single channel)
5392 * minus 2 flits for CPL_TRACE_PKT header.
5394 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
5398 * If multiple tracers are disabled, to avoid deadlocks
5399 * maximum packet capture size of 9600 bytes is recommended.
5400 * Also in this mode, only trace0 can be enabled and running.
5403 if (tp->snap_len > 9600 || idx)
5407 if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 ||
5408 tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET ||
5409 tp->min_len > M_TFMINPKTSIZE)
5412 /* stop the tracer we'll be changing */
5413 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0);
5415 idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
5416 data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
5417 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
5419 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5420 t4_write_reg(adap, data_reg, tp->data[i]);
5421 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
5423 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
5424 V_TFCAPTUREMAX(tp->snap_len) |
5425 V_TFMINPKTSIZE(tp->min_len));
5426 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
5427 V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en |
5429 V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) :
5430 V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert)));
5436 * t4_get_trace_filter - query one of the tracing filters
5437 * @adap: the adapter
5438 * @tp: the current trace filter parameters
5439 * @idx: which trace filter to query
5440 * @enabled: non-zero if the filter is enabled
5442 * Returns the current settings of one of the HW tracing filters.
5444 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
5448 int i, ofst = idx * 4;
5449 u32 data_reg, mask_reg;
5451 ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
5452 ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
5455 *enabled = !!(ctla & F_TFEN);
5456 tp->port = G_TFPORT(ctla);
5457 tp->invert = !!(ctla & F_TFINVERTMATCH);
5459 *enabled = !!(ctla & F_T5_TFEN);
5460 tp->port = G_T5_TFPORT(ctla);
5461 tp->invert = !!(ctla & F_T5_TFINVERTMATCH);
5463 tp->snap_len = G_TFCAPTUREMAX(ctlb);
5464 tp->min_len = G_TFMINPKTSIZE(ctlb);
5465 tp->skip_ofst = G_TFOFFSET(ctla);
5466 tp->skip_len = G_TFLENGTH(ctla);
5468 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
5469 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
5470 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
5472 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5473 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
5474 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
5479 * t4_pmtx_get_stats - returns the HW stats from PMTX
5480 * @adap: the adapter
5481 * @cnt: where to store the count statistics
5482 * @cycles: where to store the cycle statistics
5484 * Returns performance statistics from PMTX.
5486 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5491 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
5492 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
5493 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
5495 cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
5497 t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
5498 A_PM_TX_DBG_DATA, data, 2,
5499 A_PM_TX_DBG_STAT_MSB);
5500 cycles[i] = (((u64)data[0] << 32) | data[1]);
5506 * t4_pmrx_get_stats - returns the HW stats from PMRX
5507 * @adap: the adapter
5508 * @cnt: where to store the count statistics
5509 * @cycles: where to store the cycle statistics
5511 * Returns performance statistics from PMRX.
5513 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5518 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
5519 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
5520 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
5522 cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
5524 t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
5525 A_PM_RX_DBG_DATA, data, 2,
5526 A_PM_RX_DBG_STAT_MSB);
5527 cycles[i] = (((u64)data[0] << 32) | data[1]);
5533 * t4_get_mps_bg_map - return the buffer groups associated with a port
5534 * @adap: the adapter
5535 * @idx: the port index
5537 * Returns a bitmap indicating which MPS buffer groups are associated
5538 * with the given port. Bit i is set if buffer group i is used by the
5541 static unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
5543 u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
5546 return idx == 0 ? 0xf : 0;
5547 if (n == 1 && chip_id(adap) <= CHELSIO_T5)
5548 return idx < 2 ? (3 << (2 * idx)) : 0;
5553 * t4_get_port_type_description - return Port Type string description
5554 * @port_type: firmware Port Type enumeration
5556 const char *t4_get_port_type_description(enum fw_port_type port_type)
5558 static const char *const port_type_description[] = {
5577 if (port_type < ARRAY_SIZE(port_type_description))
5578 return port_type_description[port_type];
5583 * t4_get_port_stats_offset - collect port stats relative to a previous
5585 * @adap: The adapter
5587 * @stats: Current stats to fill
5588 * @offset: Previous stats snapshot
5590 void t4_get_port_stats_offset(struct adapter *adap, int idx,
5591 struct port_stats *stats,
5592 struct port_stats *offset)
5597 t4_get_port_stats(adap, idx, stats);
5598 for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
5599 i < (sizeof(struct port_stats)/sizeof(u64)) ;
5605 * t4_get_port_stats - collect port statistics
5606 * @adap: the adapter
5607 * @idx: the port index
5608 * @p: the stats structure to fill
5610 * Collect statistics related to the given port from HW.
5612 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
5614 u32 bgmap = t4_get_mps_bg_map(adap, idx);
5616 #define GET_STAT(name) \
5617 t4_read_reg64(adap, \
5618 (is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
5619 T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
5620 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
5622 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
5623 p->tx_octets = GET_STAT(TX_PORT_BYTES);
5624 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
5625 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
5626 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
5627 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
5628 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
5629 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
5630 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
5631 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
5632 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
5633 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
5634 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
5635 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
5636 p->tx_drop = GET_STAT(TX_PORT_DROP);
5637 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
5638 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
5639 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
5640 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
5641 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
5642 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
5643 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
5644 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
5646 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
5647 p->rx_octets = GET_STAT(RX_PORT_BYTES);
5648 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
5649 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
5650 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
5651 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
5652 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
5653 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
5654 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
5655 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
5656 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
5657 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
5658 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
5659 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
5660 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
5661 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
5662 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
5663 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
5664 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
5665 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
5666 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
5667 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
5668 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
5669 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
5670 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
5671 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
5672 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
5674 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
5675 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
5676 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
5677 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
5678 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
5679 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
5680 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
5681 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
5688 * t4_get_lb_stats - collect loopback port statistics
5689 * @adap: the adapter
5690 * @idx: the loopback port index
5691 * @p: the stats structure to fill
5693 * Return HW statistics for the given loopback port.
5695 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
5697 u32 bgmap = t4_get_mps_bg_map(adap, idx);
5699 #define GET_STAT(name) \
5700 t4_read_reg64(adap, \
5702 PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
5703 T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
5704 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
5706 p->octets = GET_STAT(BYTES);
5707 p->frames = GET_STAT(FRAMES);
5708 p->bcast_frames = GET_STAT(BCAST);
5709 p->mcast_frames = GET_STAT(MCAST);
5710 p->ucast_frames = GET_STAT(UCAST);
5711 p->error_frames = GET_STAT(ERROR);
5713 p->frames_64 = GET_STAT(64B);
5714 p->frames_65_127 = GET_STAT(65B_127B);
5715 p->frames_128_255 = GET_STAT(128B_255B);
5716 p->frames_256_511 = GET_STAT(256B_511B);
5717 p->frames_512_1023 = GET_STAT(512B_1023B);
5718 p->frames_1024_1518 = GET_STAT(1024B_1518B);
5719 p->frames_1519_max = GET_STAT(1519B_MAX);
5720 p->drop = GET_STAT(DROP_FRAMES);
5722 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
5723 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
5724 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
5725 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
5726 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
5727 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
5728 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
5729 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
5736 * t4_wol_magic_enable - enable/disable magic packet WoL
5737 * @adap: the adapter
5738 * @port: the physical port index
5739 * @addr: MAC address expected in magic packets, %NULL to disable
5741 * Enables/disables magic packet wake-on-LAN for the selected port.
5743 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
5746 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
5749 mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO);
5750 mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI);
5751 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
5753 mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO);
5754 mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI);
5755 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
5759 t4_write_reg(adap, mag_id_reg_l,
5760 (addr[2] << 24) | (addr[3] << 16) |
5761 (addr[4] << 8) | addr[5]);
5762 t4_write_reg(adap, mag_id_reg_h,
5763 (addr[0] << 8) | addr[1]);
5765 t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN,
5766 V_MAGICEN(addr != NULL));
5770 * t4_wol_pat_enable - enable/disable pattern-based WoL
5771 * @adap: the adapter
5772 * @port: the physical port index
5773 * @map: bitmap of which HW pattern filters to set
5774 * @mask0: byte mask for bytes 0-63 of a packet
5775 * @mask1: byte mask for bytes 64-127 of a packet
5776 * @crc: Ethernet CRC for selected bytes
5777 * @enable: enable/disable switch
5779 * Sets the pattern filters indicated in @map to mask out the bytes
5780 * specified in @mask0/@mask1 in received packets and compare the CRC of
5781 * the resulting packet against @crc. If @enable is %true pattern-based
5782 * WoL is enabled, otherwise disabled.
5784 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
5785 u64 mask0, u64 mask1, unsigned int crc, bool enable)
5791 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
5793 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
5796 t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
5802 #define EPIO_REG(name) \
5803 (is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \
5804 T5_PORT_REG(port, A_MAC_PORT_EPIO_##name))
5806 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
5807 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
5808 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
5810 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
5814 /* write byte masks */
5815 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
5816 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
5817 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
5818 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
5822 t4_write_reg(adap, EPIO_REG(DATA0), crc);
5823 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
5824 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
5825 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
5830 t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN);
5834 /* t4_mk_filtdelwr - create a delete filter WR
5835 * @ftid: the filter ID
5836 * @wr: the filter work request to populate
5837 * @qid: ingress queue to receive the delete notification
5839 * Creates a filter work request to delete the supplied filter. If @qid is
5840 * negative the delete notification is suppressed.
5842 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
5844 memset(wr, 0, sizeof(*wr));
5845 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
5846 wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
5847 wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
5848 V_FW_FILTER_WR_NOREPLY(qid < 0));
5849 wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
5851 wr->rx_chan_rx_rpl_iq =
5852 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
5855 #define INIT_CMD(var, cmd, rd_wr) do { \
5856 (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
5857 F_FW_CMD_REQUEST | \
5858 F_FW_CMD_##rd_wr); \
5859 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
5862 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
5866 struct fw_ldst_cmd c;
5868 memset(&c, 0, sizeof(c));
5869 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE);
5870 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
5874 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5875 c.u.addrval.addr = cpu_to_be32(addr);
5876 c.u.addrval.val = cpu_to_be32(val);
5878 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5882 * t4_mdio_rd - read a PHY register through MDIO
5883 * @adap: the adapter
5884 * @mbox: mailbox to use for the FW command
5885 * @phy_addr: the PHY address
5886 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
5887 * @reg: the register to read
5888 * @valp: where to store the value
5890 * Issues a FW command through the given mailbox to read a PHY register.
5892 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
5893 unsigned int mmd, unsigned int reg, unsigned int *valp)
5897 struct fw_ldst_cmd c;
5899 memset(&c, 0, sizeof(c));
5900 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
5901 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
5902 F_FW_CMD_REQUEST | F_FW_CMD_READ |
5904 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5905 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
5906 V_FW_LDST_CMD_MMD(mmd));
5907 c.u.mdio.raddr = cpu_to_be16(reg);
5909 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5911 *valp = be16_to_cpu(c.u.mdio.rval);
5916 * t4_mdio_wr - write a PHY register through MDIO
5917 * @adap: the adapter
5918 * @mbox: mailbox to use for the FW command
5919 * @phy_addr: the PHY address
5920 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
5921 * @reg: the register to write
5922 * @valp: value to write
5924 * Issues a FW command through the given mailbox to write a PHY register.
5926 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
5927 unsigned int mmd, unsigned int reg, unsigned int val)
5930 struct fw_ldst_cmd c;
5932 memset(&c, 0, sizeof(c));
5933 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
5934 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
5935 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
5937 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5938 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
5939 V_FW_LDST_CMD_MMD(mmd));
5940 c.u.mdio.raddr = cpu_to_be16(reg);
5941 c.u.mdio.rval = cpu_to_be16(val);
5943 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5948 * t4_sge_decode_idma_state - decode the idma state
5949 * @adap: the adapter
5950 * @state: the state idma is stuck in
5952 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
5954 static const char * const t4_decode[] = {
5956 "IDMA_PUSH_MORE_CPL_FIFO",
5957 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
5959 "IDMA_PHYSADDR_SEND_PCIEHDR",
5960 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
5961 "IDMA_PHYSADDR_SEND_PAYLOAD",
5962 "IDMA_SEND_FIFO_TO_IMSG",
5963 "IDMA_FL_REQ_DATA_FL_PREP",
5964 "IDMA_FL_REQ_DATA_FL",
5966 "IDMA_FL_H_REQ_HEADER_FL",
5967 "IDMA_FL_H_SEND_PCIEHDR",
5968 "IDMA_FL_H_PUSH_CPL_FIFO",
5969 "IDMA_FL_H_SEND_CPL",
5970 "IDMA_FL_H_SEND_IP_HDR_FIRST",
5971 "IDMA_FL_H_SEND_IP_HDR",
5972 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
5973 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
5974 "IDMA_FL_H_SEND_IP_HDR_PADDING",
5975 "IDMA_FL_D_SEND_PCIEHDR",
5976 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
5977 "IDMA_FL_D_REQ_NEXT_DATA_FL",
5978 "IDMA_FL_SEND_PCIEHDR",
5979 "IDMA_FL_PUSH_CPL_FIFO",
5981 "IDMA_FL_SEND_PAYLOAD_FIRST",
5982 "IDMA_FL_SEND_PAYLOAD",
5983 "IDMA_FL_REQ_NEXT_DATA_FL",
5984 "IDMA_FL_SEND_NEXT_PCIEHDR",
5985 "IDMA_FL_SEND_PADDING",
5986 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
5987 "IDMA_FL_SEND_FIFO_TO_IMSG",
5988 "IDMA_FL_REQ_DATAFL_DONE",
5989 "IDMA_FL_REQ_HEADERFL_DONE",
5991 static const char * const t5_decode[] = {
5994 "IDMA_PUSH_MORE_CPL_FIFO",
5995 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
5996 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
5997 "IDMA_PHYSADDR_SEND_PCIEHDR",
5998 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
5999 "IDMA_PHYSADDR_SEND_PAYLOAD",
6000 "IDMA_SEND_FIFO_TO_IMSG",
6001 "IDMA_FL_REQ_DATA_FL",
6003 "IDMA_FL_DROP_SEND_INC",
6004 "IDMA_FL_H_REQ_HEADER_FL",
6005 "IDMA_FL_H_SEND_PCIEHDR",
6006 "IDMA_FL_H_PUSH_CPL_FIFO",
6007 "IDMA_FL_H_SEND_CPL",
6008 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6009 "IDMA_FL_H_SEND_IP_HDR",
6010 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6011 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6012 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6013 "IDMA_FL_D_SEND_PCIEHDR",
6014 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6015 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6016 "IDMA_FL_SEND_PCIEHDR",
6017 "IDMA_FL_PUSH_CPL_FIFO",
6019 "IDMA_FL_SEND_PAYLOAD_FIRST",
6020 "IDMA_FL_SEND_PAYLOAD",
6021 "IDMA_FL_REQ_NEXT_DATA_FL",
6022 "IDMA_FL_SEND_NEXT_PCIEHDR",
6023 "IDMA_FL_SEND_PADDING",
6024 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6026 static const char * const t6_decode[] = {
6028 "IDMA_PUSH_MORE_CPL_FIFO",
6029 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6030 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6031 "IDMA_PHYSADDR_SEND_PCIEHDR",
6032 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6033 "IDMA_PHYSADDR_SEND_PAYLOAD",
6034 "IDMA_FL_REQ_DATA_FL",
6036 "IDMA_FL_DROP_SEND_INC",
6037 "IDMA_FL_H_REQ_HEADER_FL",
6038 "IDMA_FL_H_SEND_PCIEHDR",
6039 "IDMA_FL_H_PUSH_CPL_FIFO",
6040 "IDMA_FL_H_SEND_CPL",
6041 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6042 "IDMA_FL_H_SEND_IP_HDR",
6043 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6044 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6045 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6046 "IDMA_FL_D_SEND_PCIEHDR",
6047 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6048 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6049 "IDMA_FL_SEND_PCIEHDR",
6050 "IDMA_FL_PUSH_CPL_FIFO",
6052 "IDMA_FL_SEND_PAYLOAD_FIRST",
6053 "IDMA_FL_SEND_PAYLOAD",
6054 "IDMA_FL_REQ_NEXT_DATA_FL",
6055 "IDMA_FL_SEND_NEXT_PCIEHDR",
6056 "IDMA_FL_SEND_PADDING",
6057 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6059 static const u32 sge_regs[] = {
6060 A_SGE_DEBUG_DATA_LOW_INDEX_2,
6061 A_SGE_DEBUG_DATA_LOW_INDEX_3,
6062 A_SGE_DEBUG_DATA_HIGH_INDEX_10,
6064 const char * const *sge_idma_decode;
6065 int sge_idma_decode_nstates;
6067 unsigned int chip_version = chip_id(adapter);
6069 /* Select the right set of decode strings to dump depending on the
6070 * adapter chip type.
6072 switch (chip_version) {
6074 sge_idma_decode = (const char * const *)t4_decode;
6075 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
6079 sge_idma_decode = (const char * const *)t5_decode;
6080 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
6084 sge_idma_decode = (const char * const *)t6_decode;
6085 sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
6089 CH_ERR(adapter, "Unsupported chip version %d\n", chip_version);
6093 if (state < sge_idma_decode_nstates)
6094 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
6096 CH_WARN(adapter, "idma state %d unknown\n", state);
6098 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
6099 CH_WARN(adapter, "SGE register %#x value %#x\n",
6100 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
6104 * t4_sge_ctxt_flush - flush the SGE context cache
6105 * @adap: the adapter
6106 * @mbox: mailbox to use for the FW command
6108 * Issues a FW command through the given mailbox to flush the
6109 * SGE context cache.
6111 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
6115 struct fw_ldst_cmd c;
6117 memset(&c, 0, sizeof(c));
6118 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC);
6119 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6120 F_FW_CMD_REQUEST | F_FW_CMD_READ |
6122 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6123 c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH);
6125 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6130 * t4_fw_hello - establish communication with FW
6131 * @adap: the adapter
6132 * @mbox: mailbox to use for the FW command
6133 * @evt_mbox: mailbox to receive async FW events
6134 * @master: specifies the caller's willingness to be the device master
6135 * @state: returns the current device state (if non-NULL)
6137 * Issues a command to establish communication with FW. Returns either
6138 * an error (negative integer) or the mailbox of the Master PF.
6140 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
6141 enum dev_master master, enum dev_state *state)
6144 struct fw_hello_cmd c;
6146 unsigned int master_mbox;
6147 int retries = FW_CMD_HELLO_RETRIES;
6150 memset(&c, 0, sizeof(c));
6151 INIT_CMD(c, HELLO, WRITE);
6152 c.err_to_clearinit = cpu_to_be32(
6153 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
6154 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
6155 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ?
6156 mbox : M_FW_HELLO_CMD_MBMASTER) |
6157 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
6158 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
6159 F_FW_HELLO_CMD_CLEARINIT);
6162 * Issue the HELLO command to the firmware. If it's not successful
6163 * but indicates that we got a "busy" or "timeout" condition, retry
6164 * the HELLO until we exhaust our retry limit. If we do exceed our
6165 * retry limit, check to see if the firmware left us any error
6166 * information and report that if so ...
6168 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6169 if (ret != FW_SUCCESS) {
6170 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
6172 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
6173 t4_report_fw_error(adap);
6177 v = be32_to_cpu(c.err_to_clearinit);
6178 master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
6180 if (v & F_FW_HELLO_CMD_ERR)
6181 *state = DEV_STATE_ERR;
6182 else if (v & F_FW_HELLO_CMD_INIT)
6183 *state = DEV_STATE_INIT;
6185 *state = DEV_STATE_UNINIT;
6189 * If we're not the Master PF then we need to wait around for the
6190 * Master PF Driver to finish setting up the adapter.
6192 * Note that we also do this wait if we're a non-Master-capable PF and
6193 * there is no current Master PF; a Master PF may show up momentarily
6194 * and we wouldn't want to fail pointlessly. (This can happen when an
6195 * OS loads lots of different drivers rapidly at the same time). In
6196 * this case, the Master PF returned by the firmware will be
6197 * M_PCIE_FW_MASTER so the test below will work ...
6199 if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
6200 master_mbox != mbox) {
6201 int waiting = FW_CMD_HELLO_TIMEOUT;
6204 * Wait for the firmware to either indicate an error or
6205 * initialized state. If we see either of these we bail out
6206 * and report the issue to the caller. If we exhaust the
6207 * "hello timeout" and we haven't exhausted our retries, try
6208 * again. Otherwise bail with a timeout error.
6217 * If neither Error nor Initialialized are indicated
6218 * by the firmware keep waiting till we exhaust our
6219 * timeout ... and then retry if we haven't exhausted
6222 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
6223 if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
6234 * We either have an Error or Initialized condition
6235 * report errors preferentially.
6238 if (pcie_fw & F_PCIE_FW_ERR)
6239 *state = DEV_STATE_ERR;
6240 else if (pcie_fw & F_PCIE_FW_INIT)
6241 *state = DEV_STATE_INIT;
6245 * If we arrived before a Master PF was selected and
6246 * there's not a valid Master PF, grab its identity
6249 if (master_mbox == M_PCIE_FW_MASTER &&
6250 (pcie_fw & F_PCIE_FW_MASTER_VLD))
6251 master_mbox = G_PCIE_FW_MASTER(pcie_fw);
6260 * t4_fw_bye - end communication with FW
6261 * @adap: the adapter
6262 * @mbox: mailbox to use for the FW command
6264 * Issues a command to terminate communication with FW.
6266 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
6268 struct fw_bye_cmd c;
6270 memset(&c, 0, sizeof(c));
6271 INIT_CMD(c, BYE, WRITE);
6272 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6276 * t4_fw_reset - issue a reset to FW
6277 * @adap: the adapter
6278 * @mbox: mailbox to use for the FW command
6279 * @reset: specifies the type of reset to perform
6281 * Issues a reset command of the specified type to FW.
6283 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
6285 struct fw_reset_cmd c;
6287 memset(&c, 0, sizeof(c));
6288 INIT_CMD(c, RESET, WRITE);
6289 c.val = cpu_to_be32(reset);
6290 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6294 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
6295 * @adap: the adapter
6296 * @mbox: mailbox to use for the FW RESET command (if desired)
6297 * @force: force uP into RESET even if FW RESET command fails
6299 * Issues a RESET command to firmware (if desired) with a HALT indication
6300 * and then puts the microprocessor into RESET state. The RESET command
6301 * will only be issued if a legitimate mailbox is provided (mbox <=
6302 * M_PCIE_FW_MASTER).
6304 * This is generally used in order for the host to safely manipulate the
6305 * adapter without fear of conflicting with whatever the firmware might
6306 * be doing. The only way out of this state is to RESTART the firmware
6309 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
6314 * If a legitimate mailbox is provided, issue a RESET command
6315 * with a HALT indication.
6317 if (mbox <= M_PCIE_FW_MASTER) {
6318 struct fw_reset_cmd c;
6320 memset(&c, 0, sizeof(c));
6321 INIT_CMD(c, RESET, WRITE);
6322 c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
6323 c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
6324 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6328 * Normally we won't complete the operation if the firmware RESET
6329 * command fails but if our caller insists we'll go ahead and put the
6330 * uP into RESET. This can be useful if the firmware is hung or even
6331 * missing ... We'll have to take the risk of putting the uP into
6332 * RESET without the cooperation of firmware in that case.
6334 * We also force the firmware's HALT flag to be on in case we bypassed
6335 * the firmware RESET command above or we're dealing with old firmware
6336 * which doesn't have the HALT capability. This will serve as a flag
6337 * for the incoming firmware to know that it's coming out of a HALT
6338 * rather than a RESET ... if it's new enough to understand that ...
6340 if (ret == 0 || force) {
6341 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
6342 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
6347 * And we always return the result of the firmware RESET command
6348 * even when we force the uP into RESET ...
6354 * t4_fw_restart - restart the firmware by taking the uP out of RESET
6355 * @adap: the adapter
6356 * @reset: if we want to do a RESET to restart things
6358 * Restart firmware previously halted by t4_fw_halt(). On successful
6359 * return the previous PF Master remains as the new PF Master and there
6360 * is no need to issue a new HELLO command, etc.
6362 * We do this in two ways:
6364 * 1. If we're dealing with newer firmware we'll simply want to take
6365 * the chip's microprocessor out of RESET. This will cause the
6366 * firmware to start up from its start vector. And then we'll loop
6367 * until the firmware indicates it's started again (PCIE_FW.HALT
6368 * reset to 0) or we timeout.
6370 * 2. If we're dealing with older firmware then we'll need to RESET
6371 * the chip since older firmware won't recognize the PCIE_FW.HALT
6372 * flag and automatically RESET itself on startup.
6374 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
6378 * Since we're directing the RESET instead of the firmware
6379 * doing it automatically, we need to clear the PCIE_FW.HALT
6382 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
6385 * If we've been given a valid mailbox, first try to get the
6386 * firmware to do the RESET. If that works, great and we can
6387 * return success. Otherwise, if we haven't been given a
6388 * valid mailbox or the RESET command failed, fall back to
6389 * hitting the chip with a hammer.
6391 if (mbox <= M_PCIE_FW_MASTER) {
6392 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
6394 if (t4_fw_reset(adap, mbox,
6395 F_PIORST | F_PIORSTMODE) == 0)
6399 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
6404 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
6405 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
6406 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
6417 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
6418 * @adap: the adapter
6419 * @mbox: mailbox to use for the FW RESET command (if desired)
6420 * @fw_data: the firmware image to write
6422 * @force: force upgrade even if firmware doesn't cooperate
6424 * Perform all of the steps necessary for upgrading an adapter's
6425 * firmware image. Normally this requires the cooperation of the
6426 * existing firmware in order to halt all existing activities
6427 * but if an invalid mailbox token is passed in we skip that step
6428 * (though we'll still put the adapter microprocessor into RESET in
6431 * On successful return the new firmware will have been loaded and
6432 * the adapter will have been fully RESET losing all previous setup
6433 * state. On unsuccessful return the adapter may be completely hosed ...
6434 * positive errno indicates that the adapter is ~probably~ intact, a
6435 * negative errno indicates that things are looking bad ...
6437 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
6438 const u8 *fw_data, unsigned int size, int force)
6440 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
6441 unsigned int bootstrap =
6442 be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
6445 if (!t4_fw_matches_chip(adap, fw_hdr))
6449 ret = t4_fw_halt(adap, mbox, force);
6450 if (ret < 0 && !force)
6454 ret = t4_load_fw(adap, fw_data, size);
6455 if (ret < 0 || bootstrap)
6459 * Older versions of the firmware don't understand the new
6460 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
6461 * restart. So for newly loaded older firmware we'll have to do the
6462 * RESET for it so it starts up on a clean slate. We can tell if
6463 * the newly loaded firmware will handle this right by checking
6464 * its header flags to see if it advertises the capability.
6466 reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
6467 return t4_fw_restart(adap, mbox, reset);
6471 * t4_fw_initialize - ask FW to initialize the device
6472 * @adap: the adapter
6473 * @mbox: mailbox to use for the FW command
6475 * Issues a command to FW to partially initialize the device. This
6476 * performs initialization that generally doesn't depend on user input.
6478 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
6480 struct fw_initialize_cmd c;
6482 memset(&c, 0, sizeof(c));
6483 INIT_CMD(c, INITIALIZE, WRITE);
6484 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6488 * t4_query_params_rw - query FW or device parameters
6489 * @adap: the adapter
6490 * @mbox: mailbox to use for the FW command
6493 * @nparams: the number of parameters
6494 * @params: the parameter names
6495 * @val: the parameter values
6496 * @rw: Write and read flag
6498 * Reads the value of FW or device parameters. Up to 7 parameters can be
6501 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
6502 unsigned int vf, unsigned int nparams, const u32 *params,
6506 struct fw_params_cmd c;
6507 __be32 *p = &c.param[0].mnem;
6512 memset(&c, 0, sizeof(c));
6513 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
6514 F_FW_CMD_REQUEST | F_FW_CMD_READ |
6515 V_FW_PARAMS_CMD_PFN(pf) |
6516 V_FW_PARAMS_CMD_VFN(vf));
6517 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6519 for (i = 0; i < nparams; i++) {
6520 *p++ = cpu_to_be32(*params++);
6522 *p = cpu_to_be32(*(val + i));
6526 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6528 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
6529 *val++ = be32_to_cpu(*p);
6533 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
6534 unsigned int vf, unsigned int nparams, const u32 *params,
6537 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
6541 * t4_set_params_timeout - sets FW or device parameters
6542 * @adap: the adapter
6543 * @mbox: mailbox to use for the FW command
6546 * @nparams: the number of parameters
6547 * @params: the parameter names
6548 * @val: the parameter values
6549 * @timeout: the timeout time
6551 * Sets the value of FW or device parameters. Up to 7 parameters can be
6552 * specified at once.
6554 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
6555 unsigned int pf, unsigned int vf,
6556 unsigned int nparams, const u32 *params,
6557 const u32 *val, int timeout)
6559 struct fw_params_cmd c;
6560 __be32 *p = &c.param[0].mnem;
6565 memset(&c, 0, sizeof(c));
6566 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
6567 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
6568 V_FW_PARAMS_CMD_PFN(pf) |
6569 V_FW_PARAMS_CMD_VFN(vf));
6570 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6573 *p++ = cpu_to_be32(*params++);
6574 *p++ = cpu_to_be32(*val++);
6577 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
6581 * t4_set_params - sets FW or device parameters
6582 * @adap: the adapter
6583 * @mbox: mailbox to use for the FW command
6586 * @nparams: the number of parameters
6587 * @params: the parameter names
6588 * @val: the parameter values
6590 * Sets the value of FW or device parameters. Up to 7 parameters can be
6591 * specified at once.
6593 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
6594 unsigned int vf, unsigned int nparams, const u32 *params,
6597 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
6598 FW_CMD_MAX_TIMEOUT);
6602 * t4_cfg_pfvf - configure PF/VF resource limits
6603 * @adap: the adapter
6604 * @mbox: mailbox to use for the FW command
6605 * @pf: the PF being configured
6606 * @vf: the VF being configured
6607 * @txq: the max number of egress queues
6608 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
6609 * @rxqi: the max number of interrupt-capable ingress queues
6610 * @rxq: the max number of interruptless ingress queues
6611 * @tc: the PCI traffic class
6612 * @vi: the max number of virtual interfaces
6613 * @cmask: the channel access rights mask for the PF/VF
6614 * @pmask: the port access rights mask for the PF/VF
6615 * @nexact: the maximum number of exact MPS filters
6616 * @rcaps: read capabilities
6617 * @wxcaps: write/execute capabilities
6619 * Configures resource limits and capabilities for a physical or virtual
6622 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
6623 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
6624 unsigned int rxqi, unsigned int rxq, unsigned int tc,
6625 unsigned int vi, unsigned int cmask, unsigned int pmask,
6626 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
6628 struct fw_pfvf_cmd c;
6630 memset(&c, 0, sizeof(c));
6631 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
6632 F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
6633 V_FW_PFVF_CMD_VFN(vf));
6634 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6635 c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
6636 V_FW_PFVF_CMD_NIQ(rxq));
6637 c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) |
6638 V_FW_PFVF_CMD_PMASK(pmask) |
6639 V_FW_PFVF_CMD_NEQ(txq));
6640 c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) |
6641 V_FW_PFVF_CMD_NVI(vi) |
6642 V_FW_PFVF_CMD_NEXACTF(nexact));
6643 c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) |
6644 V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
6645 V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
6646 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6650 * t4_alloc_vi_func - allocate a virtual interface
6651 * @adap: the adapter
6652 * @mbox: mailbox to use for the FW command
6653 * @port: physical port associated with the VI
6654 * @pf: the PF owning the VI
6655 * @vf: the VF owning the VI
6656 * @nmac: number of MAC addresses needed (1 to 5)
6657 * @mac: the MAC addresses of the VI
6658 * @rss_size: size of RSS table slice associated with this VI
6659 * @portfunc: which Port Application Function MAC Address is desired
6660 * @idstype: Intrusion Detection Type
6662 * Allocates a virtual interface for the given physical port. If @mac is
6663 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
6664 * If @rss_size is %NULL the VI is not assigned any RSS slice by FW.
6665 * @mac should be large enough to hold @nmac Ethernet addresses, they are
6666 * stored consecutively so the space needed is @nmac * 6 bytes.
6667 * Returns a negative error number or the non-negative VI id.
6669 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
6670 unsigned int port, unsigned int pf, unsigned int vf,
6671 unsigned int nmac, u8 *mac, u16 *rss_size,
6672 unsigned int portfunc, unsigned int idstype)
6677 memset(&c, 0, sizeof(c));
6678 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
6679 F_FW_CMD_WRITE | F_FW_CMD_EXEC |
6680 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
6681 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
6682 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
6683 V_FW_VI_CMD_FUNC(portfunc));
6684 c.portid_pkd = V_FW_VI_CMD_PORTID(port);
6687 c.norss_rsssize = F_FW_VI_CMD_NORSS;
6689 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6694 memcpy(mac, c.mac, sizeof(c.mac));
6697 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
6699 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
6701 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
6703 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
6707 *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
6708 return G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid));
6712 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface
6713 * @adap: the adapter
6714 * @mbox: mailbox to use for the FW command
6715 * @port: physical port associated with the VI
6716 * @pf: the PF owning the VI
6717 * @vf: the VF owning the VI
6718 * @nmac: number of MAC addresses needed (1 to 5)
6719 * @mac: the MAC addresses of the VI
6720 * @rss_size: size of RSS table slice associated with this VI
6722 * backwards compatible and convieniance routine to allocate a Virtual
6723 * Interface with a Ethernet Port Application Function and Intrustion
6724 * Detection System disabled.
6726 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
6727 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
6730 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
6735 * t4_free_vi - free a virtual interface
6736 * @adap: the adapter
6737 * @mbox: mailbox to use for the FW command
6738 * @pf: the PF owning the VI
6739 * @vf: the VF owning the VI
6740 * @viid: virtual interface identifiler
6742 * Free a previously allocated virtual interface.
6744 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
6745 unsigned int vf, unsigned int viid)
6749 memset(&c, 0, sizeof(c));
6750 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
6753 V_FW_VI_CMD_PFN(pf) |
6754 V_FW_VI_CMD_VFN(vf));
6755 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
6756 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
6758 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6762 * t4_set_rxmode - set Rx properties of a virtual interface
6763 * @adap: the adapter
6764 * @mbox: mailbox to use for the FW command
6766 * @mtu: the new MTU or -1
6767 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
6768 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
6769 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
6770 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
6771 * @sleep_ok: if true we may sleep while awaiting command completion
6773 * Sets Rx properties of a virtual interface.
6775 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
6776 int mtu, int promisc, int all_multi, int bcast, int vlanex,
6779 struct fw_vi_rxmode_cmd c;
6781 /* convert to FW values */
6783 mtu = M_FW_VI_RXMODE_CMD_MTU;
6785 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
6787 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
6789 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
6791 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
6793 memset(&c, 0, sizeof(c));
6794 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
6795 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
6796 V_FW_VI_RXMODE_CMD_VIID(viid));
6797 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6799 cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
6800 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
6801 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
6802 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
6803 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
6804 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
6808 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
6809 * @adap: the adapter
6810 * @mbox: mailbox to use for the FW command
6812 * @free: if true any existing filters for this VI id are first removed
6813 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
6814 * @addr: the MAC address(es)
6815 * @idx: where to store the index of each allocated filter
6816 * @hash: pointer to hash address filter bitmap
6817 * @sleep_ok: call is allowed to sleep
6819 * Allocates an exact-match filter for each of the supplied addresses and
6820 * sets it to the corresponding address. If @idx is not %NULL it should
6821 * have at least @naddr entries, each of which will be set to the index of
6822 * the filter allocated for the corresponding MAC address. If a filter
6823 * could not be allocated for an address its index is set to 0xffff.
6824 * If @hash is not %NULL addresses that fail to allocate an exact filter
6825 * are hashed and update the hash filter bitmap pointed at by @hash.
6827 * Returns a negative error number or the number of filters allocated.
6829 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
6830 unsigned int viid, bool free, unsigned int naddr,
6831 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
6833 int offset, ret = 0;
6834 struct fw_vi_mac_cmd c;
6835 unsigned int nfilters = 0;
6836 unsigned int max_naddr = adap->chip_params->mps_tcam_size;
6837 unsigned int rem = naddr;
6839 if (naddr > max_naddr)
6842 for (offset = 0; offset < naddr ; /**/) {
6843 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
6845 : ARRAY_SIZE(c.u.exact));
6846 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
6847 u.exact[fw_naddr]), 16);
6848 struct fw_vi_mac_exact *p;
6851 memset(&c, 0, sizeof(c));
6852 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
6855 V_FW_CMD_EXEC(free) |
6856 V_FW_VI_MAC_CMD_VIID(viid));
6857 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) |
6858 V_FW_CMD_LEN16(len16));
6860 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
6862 cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
6863 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
6864 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
6868 * It's okay if we run out of space in our MAC address arena.
6869 * Some of the addresses we submit may get stored so we need
6870 * to run through the reply to see what the results were ...
6872 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
6873 if (ret && ret != -FW_ENOMEM)
6876 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
6877 u16 index = G_FW_VI_MAC_CMD_IDX(
6878 be16_to_cpu(p->valid_to_idx));
6881 idx[offset+i] = (index >= max_naddr
6884 if (index < max_naddr)
6887 *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
6895 if (ret == 0 || ret == -FW_ENOMEM)
6901 * t4_change_mac - modifies the exact-match filter for a MAC address
6902 * @adap: the adapter
6903 * @mbox: mailbox to use for the FW command
6905 * @idx: index of existing filter for old value of MAC address, or -1
6906 * @addr: the new MAC address value
6907 * @persist: whether a new MAC allocation should be persistent
6908 * @add_smt: if true also add the address to the HW SMT
6910 * Modifies an exact-match filter and sets it to the new MAC address if
6911 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
6912 * latter case the address is added persistently if @persist is %true.
6914 * Note that in general it is not possible to modify the value of a given
6915 * filter so the generic way to modify an address filter is to free the one
6916 * being used by the old address value and allocate a new filter for the
6917 * new address value.
6919 * Returns a negative error number or the index of the filter with the new
6920 * MAC value. Note that this index may differ from @idx.
6922 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
6923 int idx, const u8 *addr, bool persist, bool add_smt)
6926 struct fw_vi_mac_cmd c;
6927 struct fw_vi_mac_exact *p = c.u.exact;
6928 unsigned int max_mac_addr = adap->chip_params->mps_tcam_size;
6930 if (idx < 0) /* new allocation */
6931 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
6932 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
6934 memset(&c, 0, sizeof(c));
6935 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
6936 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
6937 V_FW_VI_MAC_CMD_VIID(viid));
6938 c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1));
6939 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
6940 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
6941 V_FW_VI_MAC_CMD_IDX(idx));
6942 memcpy(p->macaddr, addr, sizeof(p->macaddr));
6944 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6946 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
6947 if (ret >= max_mac_addr)
6954 * t4_set_addr_hash - program the MAC inexact-match hash filter
6955 * @adap: the adapter
6956 * @mbox: mailbox to use for the FW command
6958 * @ucast: whether the hash filter should also match unicast addresses
6959 * @vec: the value to be written to the hash filter
6960 * @sleep_ok: call is allowed to sleep
6962 * Sets the 64-bit inexact-match hash filter for a virtual interface.
6964 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
6965 bool ucast, u64 vec, bool sleep_ok)
6967 struct fw_vi_mac_cmd c;
6970 memset(&c, 0, sizeof(c));
6971 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
6972 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
6973 V_FW_VI_ENABLE_CMD_VIID(viid));
6974 val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) |
6975 V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1);
6976 c.freemacs_to_len16 = cpu_to_be32(val);
6977 c.u.hash.hashvec = cpu_to_be64(vec);
6978 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
6982 * t4_enable_vi_params - enable/disable a virtual interface
6983 * @adap: the adapter
6984 * @mbox: mailbox to use for the FW command
6986 * @rx_en: 1=enable Rx, 0=disable Rx
6987 * @tx_en: 1=enable Tx, 0=disable Tx
6988 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
6990 * Enables/disables a virtual interface. Note that setting DCB Enable
6991 * only makes sense when enabling a Virtual Interface ...
6993 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
6994 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
6996 struct fw_vi_enable_cmd c;
6998 memset(&c, 0, sizeof(c));
6999 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
7000 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7001 V_FW_VI_ENABLE_CMD_VIID(viid));
7002 c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
7003 V_FW_VI_ENABLE_CMD_EEN(tx_en) |
7004 V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
7006 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
7010 * t4_enable_vi - enable/disable a virtual interface
7011 * @adap: the adapter
7012 * @mbox: mailbox to use for the FW command
7014 * @rx_en: 1=enable Rx, 0=disable Rx
7015 * @tx_en: 1=enable Tx, 0=disable Tx
7017 * Enables/disables a virtual interface. Note that setting DCB Enable
7018 * only makes sense when enabling a Virtual Interface ...
7020 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
7021 bool rx_en, bool tx_en)
7023 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
7027 * t4_identify_port - identify a VI's port by blinking its LED
7028 * @adap: the adapter
7029 * @mbox: mailbox to use for the FW command
7031 * @nblinks: how many times to blink LED at 2.5 Hz
7033 * Identifies a VI's port by blinking its LED.
7035 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
7036 unsigned int nblinks)
7038 struct fw_vi_enable_cmd c;
7040 memset(&c, 0, sizeof(c));
7041 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
7042 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7043 V_FW_VI_ENABLE_CMD_VIID(viid));
7044 c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
7045 c.blinkdur = cpu_to_be16(nblinks);
7046 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7050 * t4_iq_stop - stop an ingress queue and its FLs
7051 * @adap: the adapter
7052 * @mbox: mailbox to use for the FW command
7053 * @pf: the PF owning the queues
7054 * @vf: the VF owning the queues
7055 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
7056 * @iqid: ingress queue id
7057 * @fl0id: FL0 queue id or 0xffff if no attached FL0
7058 * @fl1id: FL1 queue id or 0xffff if no attached FL1
7060 * Stops an ingress queue and its associated FLs, if any. This causes
7061 * any current or future data/messages destined for these queues to be
7064 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
7065 unsigned int vf, unsigned int iqtype, unsigned int iqid,
7066 unsigned int fl0id, unsigned int fl1id)
7070 memset(&c, 0, sizeof(c));
7071 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
7072 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
7073 V_FW_IQ_CMD_VFN(vf));
7074 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c));
7075 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
7076 c.iqid = cpu_to_be16(iqid);
7077 c.fl0id = cpu_to_be16(fl0id);
7078 c.fl1id = cpu_to_be16(fl1id);
7079 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7083 * t4_iq_free - free an ingress queue and its FLs
7084 * @adap: the adapter
7085 * @mbox: mailbox to use for the FW command
7086 * @pf: the PF owning the queues
7087 * @vf: the VF owning the queues
7088 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
7089 * @iqid: ingress queue id
7090 * @fl0id: FL0 queue id or 0xffff if no attached FL0
7091 * @fl1id: FL1 queue id or 0xffff if no attached FL1
7093 * Frees an ingress queue and its associated FLs, if any.
7095 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7096 unsigned int vf, unsigned int iqtype, unsigned int iqid,
7097 unsigned int fl0id, unsigned int fl1id)
7101 memset(&c, 0, sizeof(c));
7102 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
7103 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
7104 V_FW_IQ_CMD_VFN(vf));
7105 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
7106 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
7107 c.iqid = cpu_to_be16(iqid);
7108 c.fl0id = cpu_to_be16(fl0id);
7109 c.fl1id = cpu_to_be16(fl1id);
7110 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7114 * t4_eth_eq_free - free an Ethernet egress queue
7115 * @adap: the adapter
7116 * @mbox: mailbox to use for the FW command
7117 * @pf: the PF owning the queue
7118 * @vf: the VF owning the queue
7119 * @eqid: egress queue id
7121 * Frees an Ethernet egress queue.
7123 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7124 unsigned int vf, unsigned int eqid)
7126 struct fw_eq_eth_cmd c;
7128 memset(&c, 0, sizeof(c));
7129 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
7130 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7131 V_FW_EQ_ETH_CMD_PFN(pf) |
7132 V_FW_EQ_ETH_CMD_VFN(vf));
7133 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
7134 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
7135 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7139 * t4_ctrl_eq_free - free a control egress queue
7140 * @adap: the adapter
7141 * @mbox: mailbox to use for the FW command
7142 * @pf: the PF owning the queue
7143 * @vf: the VF owning the queue
7144 * @eqid: egress queue id
7146 * Frees a control egress queue.
7148 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7149 unsigned int vf, unsigned int eqid)
7151 struct fw_eq_ctrl_cmd c;
7153 memset(&c, 0, sizeof(c));
7154 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
7155 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7156 V_FW_EQ_CTRL_CMD_PFN(pf) |
7157 V_FW_EQ_CTRL_CMD_VFN(vf));
7158 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
7159 c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
7160 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7164 * t4_ofld_eq_free - free an offload egress queue
7165 * @adap: the adapter
7166 * @mbox: mailbox to use for the FW command
7167 * @pf: the PF owning the queue
7168 * @vf: the VF owning the queue
7169 * @eqid: egress queue id
7171 * Frees a control egress queue.
7173 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7174 unsigned int vf, unsigned int eqid)
7176 struct fw_eq_ofld_cmd c;
7178 memset(&c, 0, sizeof(c));
7179 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) |
7180 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7181 V_FW_EQ_OFLD_CMD_PFN(pf) |
7182 V_FW_EQ_OFLD_CMD_VFN(vf));
7183 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
7184 c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid));
7185 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7189 * t4_link_down_rc_str - return a string for a Link Down Reason Code
7190 * @link_down_rc: Link Down Reason Code
7192 * Returns a string representation of the Link Down Reason Code.
7194 const char *t4_link_down_rc_str(unsigned char link_down_rc)
7196 static const char *reason[] = {
7199 "Auto-negotiation Failure",
7201 "Insufficient Airflow",
7202 "Unable To Determine Reason",
7203 "No RX Signal Detected",
7207 if (link_down_rc >= ARRAY_SIZE(reason))
7208 return "Bad Reason Code";
7210 return reason[link_down_rc];
7214 * t4_handle_fw_rpl - process a FW reply message
7215 * @adap: the adapter
7216 * @rpl: start of the FW message
7218 * Processes a FW message, such as link state change messages.
7220 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
7222 u8 opcode = *(const u8 *)rpl;
7223 const struct fw_port_cmd *p = (const void *)rpl;
7224 unsigned int action =
7225 G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
7227 if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
7228 /* link/module state change message */
7229 int speed = 0, fc = 0, i;
7230 int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
7231 struct port_info *pi = NULL;
7232 struct link_config *lc;
7233 u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
7234 int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
7235 u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
7237 if (stat & F_FW_PORT_CMD_RXPAUSE)
7239 if (stat & F_FW_PORT_CMD_TXPAUSE)
7241 if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
7243 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
7245 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
7247 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
7250 for_each_port(adap, i) {
7251 pi = adap2pinfo(adap, i);
7252 if (pi->tx_chan == chan)
7257 if (mod != pi->mod_type) {
7259 t4_os_portmod_changed(adap, i);
7261 if (link_ok != lc->link_ok || speed != lc->speed ||
7262 fc != lc->fc) { /* something changed */
7265 if (!link_ok && lc->link_ok)
7266 reason = G_FW_PORT_CMD_LINKDNRC(stat);
7270 lc->link_ok = link_ok;
7273 lc->supported = be16_to_cpu(p->u.info.pcap);
7274 t4_os_link_changed(adap, i, link_ok, reason);
7277 CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode);
7284 * get_pci_mode - determine a card's PCI mode
7285 * @adapter: the adapter
7286 * @p: where to store the PCI settings
7288 * Determines a card's PCI mode and associated parameters, such as speed
7291 static void get_pci_mode(struct adapter *adapter,
7292 struct pci_params *p)
7297 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
7299 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
7300 p->speed = val & PCI_EXP_LNKSTA_CLS;
7301 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
7306 * init_link_config - initialize a link's SW state
7307 * @lc: structure holding the link state
7308 * @caps: link capabilities
7310 * Initializes the SW state maintained for each link, including the link's
7311 * capabilities and default speed/flow-control/autonegotiation settings.
7313 static void init_link_config(struct link_config *lc, unsigned int caps)
7315 lc->supported = caps;
7316 lc->requested_speed = 0;
7318 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
7319 if (lc->supported & FW_PORT_CAP_ANEG) {
7320 lc->advertising = lc->supported & ADVERT_MASK;
7321 lc->autoneg = AUTONEG_ENABLE;
7322 lc->requested_fc |= PAUSE_AUTONEG;
7324 lc->advertising = 0;
7325 lc->autoneg = AUTONEG_DISABLE;
7330 u32 vendor_and_model_id;
7334 int t4_get_flash_params(struct adapter *adapter)
7337 * Table for non-Numonix supported flash parts. Numonix parts are left
7338 * to the preexisting well-tested code. All flash parts have 64KB
7341 static struct flash_desc supported_flash[] = {
7342 { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
7348 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
7350 ret = sf1_read(adapter, 3, 0, 1, &info);
7351 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
7355 for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret)
7356 if (supported_flash[ret].vendor_and_model_id == info) {
7357 adapter->params.sf_size = supported_flash[ret].size_mb;
7358 adapter->params.sf_nsec =
7359 adapter->params.sf_size / SF_SEC_SIZE;
7363 if ((info & 0xff) != 0x20) /* not a Numonix flash */
7365 info >>= 16; /* log2 of size */
7366 if (info >= 0x14 && info < 0x18)
7367 adapter->params.sf_nsec = 1 << (info - 16);
7368 else if (info == 0x18)
7369 adapter->params.sf_nsec = 64;
7372 adapter->params.sf_size = 1 << info;
7375 * We should ~probably~ reject adapters with FLASHes which are too
7376 * small but we have some legacy FPGAs with small FLASHes that we'd
7377 * still like to use. So instead we emit a scary message ...
7379 if (adapter->params.sf_size < FLASH_MIN_SIZE)
7380 CH_WARN(adapter, "WARNING!!! FLASH size %#x < %#x!!!\n",
7381 adapter->params.sf_size, FLASH_MIN_SIZE);
7386 static void set_pcie_completion_timeout(struct adapter *adapter,
7392 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
7394 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
7397 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
7401 static const struct chip_params *get_chip_params(int chipid)
7403 static const struct chip_params chip_params[] = {
7407 .pm_stats_cnt = PM_NSTATS,
7408 .cng_ch_bits_log = 2,
7410 .cim_num_obq = CIM_NUM_OBQ,
7411 .mps_rplc_size = 128,
7413 .sge_fl_db = F_DBPRIO,
7414 .mps_tcam_size = NUM_MPS_CLS_SRAM_L_INSTANCES,
7419 .pm_stats_cnt = PM_NSTATS,
7420 .cng_ch_bits_log = 2,
7422 .cim_num_obq = CIM_NUM_OBQ_T5,
7423 .mps_rplc_size = 128,
7425 .sge_fl_db = F_DBPRIO | F_DBTYPE,
7426 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
7431 .pm_stats_cnt = T6_PM_NSTATS,
7432 .cng_ch_bits_log = 3,
7434 .cim_num_obq = CIM_NUM_OBQ_T5,
7435 .mps_rplc_size = 256,
7438 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
7442 chipid -= CHELSIO_T4;
7443 if (chipid < 0 || chipid >= ARRAY_SIZE(chip_params))
7446 return &chip_params[chipid];
7450 * t4_prep_adapter - prepare SW and HW for operation
7451 * @adapter: the adapter
7452 * @buf: temporary space of at least VPD_LEN size provided by the caller.
7454 * Initialize adapter SW state for the various HW modules, set initial
7455 * values for some adapter tunables, take PHYs out of reset, and
7456 * initialize the MDIO interface.
7458 int t4_prep_adapter(struct adapter *adapter, u8 *buf)
7464 get_pci_mode(adapter, &adapter->params.pci);
7466 pl_rev = t4_read_reg(adapter, A_PL_REV);
7467 adapter->params.chipid = G_CHIPID(pl_rev);
7468 adapter->params.rev = G_REV(pl_rev);
7469 if (adapter->params.chipid == 0) {
7470 /* T4 did not have chipid in PL_REV (T5 onwards do) */
7471 adapter->params.chipid = CHELSIO_T4;
7473 /* T4A1 chip is not supported */
7474 if (adapter->params.rev == 1) {
7475 CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n");
7480 adapter->chip_params = get_chip_params(chip_id(adapter));
7481 if (adapter->chip_params == NULL)
7484 adapter->params.pci.vpd_cap_addr =
7485 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
7487 ret = t4_get_flash_params(adapter);
7491 ret = get_vpd_params(adapter, &adapter->params.vpd, buf);
7495 /* Cards with real ASICs have the chipid in the PCIe device id */
7496 t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id);
7497 if (device_id >> 12 == chip_id(adapter))
7498 adapter->params.cim_la_size = CIMLA_SIZE;
7501 adapter->params.fpga = 1;
7502 adapter->params.cim_la_size = 2 * CIMLA_SIZE;
7505 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
7508 * Default port and clock for debugging in case we can't reach FW.
7510 adapter->params.nports = 1;
7511 adapter->params.portvec = 1;
7512 adapter->params.vpd.cclk = 50000;
7514 /* Set pci completion timeout value to 4 seconds. */
7515 set_pcie_completion_timeout(adapter, 0xd);
7520 * t4_shutdown_adapter - shut down adapter, host & wire
7521 * @adapter: the adapter
7523 * Perform an emergency shutdown of the adapter and stop it from
7524 * continuing any further communication on the ports or DMA to the
7525 * host. This is typically used when the adapter and/or firmware
7526 * have crashed and we want to prevent any further accidental
7527 * communication with the rest of the world. This will also force
7528 * the port Link Status to go down -- if register writes work --
7529 * which should help our peers figure out that we're down.
7531 int t4_shutdown_adapter(struct adapter *adapter)
7535 t4_intr_disable(adapter);
7536 t4_write_reg(adapter, A_DBG_GPIO_EN, 0);
7537 for_each_port(adapter, port) {
7538 u32 a_port_cfg = PORT_REG(port,
7543 t4_write_reg(adapter, a_port_cfg,
7544 t4_read_reg(adapter, a_port_cfg)
7545 & ~V_SIGNAL_DET(1));
7547 t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0);
7553 * t4_init_devlog_params - initialize adapter->params.devlog
7554 * @adap: the adapter
7555 * @fw_attach: whether we can talk to the firmware
7557 * Initialize various fields of the adapter's Firmware Device Log
7558 * Parameters structure.
7560 int t4_init_devlog_params(struct adapter *adap, int fw_attach)
7562 struct devlog_params *dparams = &adap->params.devlog;
7564 unsigned int devlog_meminfo;
7565 struct fw_devlog_cmd devlog_cmd;
7568 /* If we're dealing with newer firmware, the Device Log Paramerters
7569 * are stored in a designated register which allows us to access the
7570 * Device Log even if we can't talk to the firmware.
7573 t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG));
7575 unsigned int nentries, nentries128;
7577 dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams);
7578 dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4;
7580 nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams);
7581 nentries = (nentries128 + 1) * 128;
7582 dparams->size = nentries * sizeof(struct fw_devlog_e);
7588 * For any failing returns ...
7590 memset(dparams, 0, sizeof *dparams);
7593 * If we can't talk to the firmware, there's really nothing we can do
7599 /* Otherwise, ask the firmware for it's Device Log Parameters.
7601 memset(&devlog_cmd, 0, sizeof devlog_cmd);
7602 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
7603 F_FW_CMD_REQUEST | F_FW_CMD_READ);
7604 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
7605 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
7611 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
7612 dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo);
7613 dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4;
7614 dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
7620 * t4_init_sge_params - initialize adap->params.sge
7621 * @adapter: the adapter
7623 * Initialize various fields of the adapter's SGE Parameters structure.
7625 int t4_init_sge_params(struct adapter *adapter)
7628 struct sge_params *sp = &adapter->params.sge;
7630 r = t4_read_reg(adapter, A_SGE_INGRESS_RX_THRESHOLD);
7631 sp->counter_val[0] = G_THRESHOLD_0(r);
7632 sp->counter_val[1] = G_THRESHOLD_1(r);
7633 sp->counter_val[2] = G_THRESHOLD_2(r);
7634 sp->counter_val[3] = G_THRESHOLD_3(r);
7636 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_0_AND_1);
7637 sp->timer_val[0] = core_ticks_to_us(adapter, G_TIMERVALUE0(r));
7638 sp->timer_val[1] = core_ticks_to_us(adapter, G_TIMERVALUE1(r));
7639 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_2_AND_3);
7640 sp->timer_val[2] = core_ticks_to_us(adapter, G_TIMERVALUE2(r));
7641 sp->timer_val[3] = core_ticks_to_us(adapter, G_TIMERVALUE3(r));
7642 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_4_AND_5);
7643 sp->timer_val[4] = core_ticks_to_us(adapter, G_TIMERVALUE4(r));
7644 sp->timer_val[5] = core_ticks_to_us(adapter, G_TIMERVALUE5(r));
7646 r = t4_read_reg(adapter, A_SGE_CONM_CTRL);
7647 sp->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1;
7649 sp->fl_starve_threshold2 = sp->fl_starve_threshold;
7651 sp->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(r) * 2 + 1;
7653 /* egress queues: log2 of # of doorbells per BAR2 page */
7654 r = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
7655 r >>= S_QUEUESPERPAGEPF0 +
7656 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
7657 sp->eq_s_qpp = r & M_QUEUESPERPAGEPF0;
7659 /* ingress queues: log2 of # of doorbells per BAR2 page */
7660 r = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
7661 r >>= S_QUEUESPERPAGEPF0 +
7662 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
7663 sp->iq_s_qpp = r & M_QUEUESPERPAGEPF0;
7665 r = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
7666 r >>= S_HOSTPAGESIZEPF0 +
7667 (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf;
7668 sp->page_shift = (r & M_HOSTPAGESIZEPF0) + 10;
7670 r = t4_read_reg(adapter, A_SGE_CONTROL);
7671 sp->spg_len = r & F_EGRSTATUSPAGESIZE ? 128 : 64;
7672 sp->fl_pktshift = G_PKTSHIFT(r);
7673 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) + 5);
7675 sp->pack_boundary = sp->pad_boundary;
7677 r = t4_read_reg(adapter, A_SGE_CONTROL2);
7678 if (G_INGPACKBOUNDARY(r) == 0)
7679 sp->pack_boundary = 16;
7681 sp->pack_boundary = 1 << (G_INGPACKBOUNDARY(r) + 5);
7688 * Read and cache the adapter's compressed filter mode and ingress config.
7690 static void read_filter_mode_and_ingress_config(struct adapter *adap)
7692 struct tp_params *tpp = &adap->params.tp;
7694 if (t4_use_ldst(adap)) {
7695 t4_fw_tp_pio_rw(adap, &tpp->vlan_pri_map, 1,
7696 A_TP_VLAN_PRI_MAP, 1);
7697 t4_fw_tp_pio_rw(adap, &tpp->ingress_config, 1,
7698 A_TP_INGRESS_CONFIG, 1);
7700 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
7701 &tpp->vlan_pri_map, 1, A_TP_VLAN_PRI_MAP);
7702 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
7703 &tpp->ingress_config, 1, A_TP_INGRESS_CONFIG);
7707 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
7708 * shift positions of several elements of the Compressed Filter Tuple
7709 * for this adapter which we need frequently ...
7711 tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE);
7712 tpp->port_shift = t4_filter_field_shift(adap, F_PORT);
7713 tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
7714 tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN);
7715 tpp->tos_shift = t4_filter_field_shift(adap, F_TOS);
7716 tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
7717 tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE);
7718 tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH);
7719 tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE);
7720 tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION);
7723 * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
7724 * represents the presense of an Outer VLAN instead of a VNIC ID.
7726 if ((tpp->ingress_config & F_VNIC) == 0)
7727 tpp->vnic_shift = -1;
7731 * t4_init_tp_params - initialize adap->params.tp
7732 * @adap: the adapter
7734 * Initialize various fields of the adapter's TP Parameters structure.
7736 int t4_init_tp_params(struct adapter *adap)
7740 struct tp_params *tpp = &adap->params.tp;
7742 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
7743 tpp->tre = G_TIMERRESOLUTION(v);
7744 tpp->dack_re = G_DELAYEDACKRESOLUTION(v);
7746 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
7747 for (chan = 0; chan < MAX_NCHAN; chan++)
7748 tpp->tx_modq[chan] = chan;
7750 read_filter_mode_and_ingress_config(adap);
7753 * For T6, cache the adapter's compressed error vector
7754 * and passing outer header info for encapsulated packets.
7756 if (chip_id(adap) > CHELSIO_T5) {
7757 v = t4_read_reg(adap, A_TP_OUT_CONFIG);
7758 tpp->rx_pkt_encap = (v & F_CRXPKTENC) ? 1 : 0;
7765 * t4_filter_field_shift - calculate filter field shift
7766 * @adap: the adapter
7767 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
7769 * Return the shift position of a filter field within the Compressed
7770 * Filter Tuple. The filter field is specified via its selection bit
7771 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
7773 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
7775 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
7779 if ((filter_mode & filter_sel) == 0)
7782 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
7783 switch (filter_mode & sel) {
7785 field_shift += W_FT_FCOE;
7788 field_shift += W_FT_PORT;
7791 field_shift += W_FT_VNIC_ID;
7794 field_shift += W_FT_VLAN;
7797 field_shift += W_FT_TOS;
7800 field_shift += W_FT_PROTOCOL;
7803 field_shift += W_FT_ETHERTYPE;
7806 field_shift += W_FT_MACMATCH;
7809 field_shift += W_FT_MPSHITTYPE;
7811 case F_FRAGMENTATION:
7812 field_shift += W_FT_FRAGMENTATION;
7819 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id)
7823 struct fw_port_cmd c;
7825 struct port_info *p = adap2pinfo(adap, port_id);
7828 memset(&c, 0, sizeof(c));
7830 for (i = 0, j = -1; i <= p->port_id; i++) {
7833 } while ((adap->params.portvec & (1 << j)) == 0);
7836 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) |
7837 F_FW_CMD_REQUEST | F_FW_CMD_READ |
7838 V_FW_PORT_CMD_PORTID(j));
7839 c.action_to_len16 = htonl(
7840 V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
7842 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7846 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
7850 p->vi[0].viid = ret;
7852 p->rx_chan_map = t4_get_mps_bg_map(adap, j);
7854 p->vi[0].rss_size = rss_size;
7855 t4_os_set_hw_addr(adap, p->port_id, addr);
7857 ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
7858 p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
7859 G_FW_PORT_CMD_MDIOADDR(ret) : -1;
7860 p->port_type = G_FW_PORT_CMD_PTYPE(ret);
7861 p->mod_type = G_FW_PORT_CMD_MODTYPE(ret);
7863 init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap));
7865 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
7866 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
7867 V_FW_PARAMS_PARAM_YZ(p->vi[0].viid);
7868 ret = t4_query_params(adap, mbox, pf, vf, 1, ¶m, &val);
7870 p->vi[0].rss_base = 0xffff;
7872 /* MPASS((val >> 16) == rss_size); */
7873 p->vi[0].rss_base = val & 0xffff;
7880 * t4_read_cimq_cfg - read CIM queue configuration
7881 * @adap: the adapter
7882 * @base: holds the queue base addresses in bytes
7883 * @size: holds the queue sizes in bytes
7884 * @thres: holds the queue full thresholds in bytes
7886 * Returns the current configuration of the CIM queues, starting with
7887 * the IBQs, then the OBQs.
7889 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
7892 int cim_num_obq = adap->chip_params->cim_num_obq;
7894 for (i = 0; i < CIM_NUM_IBQ; i++) {
7895 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
7897 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
7898 /* value is in 256-byte units */
7899 *base++ = G_CIMQBASE(v) * 256;
7900 *size++ = G_CIMQSIZE(v) * 256;
7901 *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
7903 for (i = 0; i < cim_num_obq; i++) {
7904 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
7906 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
7907 /* value is in 256-byte units */
7908 *base++ = G_CIMQBASE(v) * 256;
7909 *size++ = G_CIMQSIZE(v) * 256;
7914 * t4_read_cim_ibq - read the contents of a CIM inbound queue
7915 * @adap: the adapter
7916 * @qid: the queue index
7917 * @data: where to store the queue contents
7918 * @n: capacity of @data in 32-bit words
7920 * Reads the contents of the selected CIM queue starting at address 0 up
7921 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
7922 * error and the number of 32-bit words actually read on success.
7924 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
7926 int i, err, attempts;
7928 const unsigned int nwords = CIM_IBQ_SIZE * 4;
7930 if (qid > 5 || (n & 3))
7933 addr = qid * nwords;
7937 /* It might take 3-10ms before the IBQ debug read access is allowed.
7938 * Wait for 1 Sec with a delay of 1 usec.
7942 for (i = 0; i < n; i++, addr++) {
7943 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
7945 err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
7949 *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
7951 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
7956 * t4_read_cim_obq - read the contents of a CIM outbound queue
7957 * @adap: the adapter
7958 * @qid: the queue index
7959 * @data: where to store the queue contents
7960 * @n: capacity of @data in 32-bit words
7962 * Reads the contents of the selected CIM queue starting at address 0 up
7963 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
7964 * error and the number of 32-bit words actually read on success.
7966 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
7969 unsigned int addr, v, nwords;
7970 int cim_num_obq = adap->chip_params->cim_num_obq;
7972 if ((qid > (cim_num_obq - 1)) || (n & 3))
7975 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
7976 V_QUENUMSELECT(qid));
7977 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
7979 addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */
7980 nwords = G_CIMQSIZE(v) * 64; /* same */
7984 for (i = 0; i < n; i++, addr++) {
7985 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
7987 err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
7991 *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
7993 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
7999 CIM_CTL_BASE = 0x2000,
8000 CIM_PBT_ADDR_BASE = 0x2800,
8001 CIM_PBT_LRF_BASE = 0x3000,
8002 CIM_PBT_DATA_BASE = 0x3800
8006 * t4_cim_read - read a block from CIM internal address space
8007 * @adap: the adapter
8008 * @addr: the start address within the CIM address space
8009 * @n: number of words to read
8010 * @valp: where to store the result
8012 * Reads a block of 4-byte words from the CIM intenal address space.
8014 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
8019 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
8022 for ( ; !ret && n--; addr += 4) {
8023 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
8024 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
8027 *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
8033 * t4_cim_write - write a block into CIM internal address space
8034 * @adap: the adapter
8035 * @addr: the start address within the CIM address space
8036 * @n: number of words to write
8037 * @valp: set of values to write
8039 * Writes a block of 4-byte words into the CIM intenal address space.
8041 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
8042 const unsigned int *valp)
8046 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
8049 for ( ; !ret && n--; addr += 4) {
8050 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
8051 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
8052 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
8058 static int t4_cim_write1(struct adapter *adap, unsigned int addr,
8061 return t4_cim_write(adap, addr, 1, &val);
8065 * t4_cim_ctl_read - read a block from CIM control region
8066 * @adap: the adapter
8067 * @addr: the start address within the CIM control region
8068 * @n: number of words to read
8069 * @valp: where to store the result
8071 * Reads a block of 4-byte words from the CIM control region.
8073 int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
8076 return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
8080 * t4_cim_read_la - read CIM LA capture buffer
8081 * @adap: the adapter
8082 * @la_buf: where to store the LA data
8083 * @wrptr: the HW write pointer within the capture buffer
8085 * Reads the contents of the CIM LA buffer with the most recent entry at
8086 * the end of the returned data and with the entry at @wrptr first.
8087 * We try to leave the LA in the running state we find it in.
8089 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
8092 unsigned int cfg, val, idx;
8094 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
8098 if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */
8099 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
8104 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
8108 idx = G_UPDBGLAWRPTR(val);
8112 for (i = 0; i < adap->params.cim_la_size; i++) {
8113 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
8114 V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
8117 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
8120 if (val & F_UPDBGLARDEN) {
8124 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
8128 /* address can't exceed 0xfff (UpDbgLaRdPtr is of 12-bits) */
8129 idx = (idx + 1) & M_UPDBGLARDPTR;
8131 * Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
8132 * identify the 32-bit portion of the full 312-bit data
8135 while ((idx & 0xf) > 9)
8136 idx = (idx + 1) % M_UPDBGLARDPTR;
8139 if (cfg & F_UPDBGLAEN) {
8140 int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
8141 cfg & ~F_UPDBGLARDEN);
8149 * t4_tp_read_la - read TP LA capture buffer
8150 * @adap: the adapter
8151 * @la_buf: where to store the LA data
8152 * @wrptr: the HW write pointer within the capture buffer
8154 * Reads the contents of the TP LA buffer with the most recent entry at
8155 * the end of the returned data and with the entry at @wrptr first.
8156 * We leave the LA in the running state we find it in.
8158 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
8160 bool last_incomplete;
8161 unsigned int i, cfg, val, idx;
8163 cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
8164 if (cfg & F_DBGLAENABLE) /* freeze LA */
8165 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
8166 adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
8168 val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
8169 idx = G_DBGLAWPTR(val);
8170 last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
8171 if (last_incomplete)
8172 idx = (idx + 1) & M_DBGLARPTR;
8177 val &= ~V_DBGLARPTR(M_DBGLARPTR);
8178 val |= adap->params.tp.la_mask;
8180 for (i = 0; i < TPLA_SIZE; i++) {
8181 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
8182 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
8183 idx = (idx + 1) & M_DBGLARPTR;
8186 /* Wipe out last entry if it isn't valid */
8187 if (last_incomplete)
8188 la_buf[TPLA_SIZE - 1] = ~0ULL;
8190 if (cfg & F_DBGLAENABLE) /* restore running state */
8191 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
8192 cfg | adap->params.tp.la_mask);
8196 * SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
8197 * seconds). If we find one of the SGE Ingress DMA State Machines in the same
8198 * state for more than the Warning Threshold then we'll issue a warning about
8199 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel
8200 * appears to be hung every Warning Repeat second till the situation clears.
8201 * If the situation clears, we'll note that as well.
8203 #define SGE_IDMA_WARN_THRESH 1
8204 #define SGE_IDMA_WARN_REPEAT 300
8207 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
8208 * @adapter: the adapter
8209 * @idma: the adapter IDMA Monitor state
8211 * Initialize the state of an SGE Ingress DMA Monitor.
8213 void t4_idma_monitor_init(struct adapter *adapter,
8214 struct sge_idma_monitor_state *idma)
8216 /* Initialize the state variables for detecting an SGE Ingress DMA
8217 * hang. The SGE has internal counters which count up on each clock
8218 * tick whenever the SGE finds its Ingress DMA State Engines in the
8219 * same state they were on the previous clock tick. The clock used is
8220 * the Core Clock so we have a limit on the maximum "time" they can
8221 * record; typically a very small number of seconds. For instance,
8222 * with a 600MHz Core Clock, we can only count up to a bit more than
8223 * 7s. So we'll synthesize a larger counter in order to not run the
8224 * risk of having the "timers" overflow and give us the flexibility to
8225 * maintain a Hung SGE State Machine of our own which operates across
8226 * a longer time frame.
8228 idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
8229 idma->idma_stalled[0] = idma->idma_stalled[1] = 0;
8233 * t4_idma_monitor - monitor SGE Ingress DMA state
8234 * @adapter: the adapter
8235 * @idma: the adapter IDMA Monitor state
8236 * @hz: number of ticks/second
8237 * @ticks: number of ticks since the last IDMA Monitor call
8239 void t4_idma_monitor(struct adapter *adapter,
8240 struct sge_idma_monitor_state *idma,
8243 int i, idma_same_state_cnt[2];
8245 /* Read the SGE Debug Ingress DMA Same State Count registers. These
8246 * are counters inside the SGE which count up on each clock when the
8247 * SGE finds its Ingress DMA State Engines in the same states they
8248 * were in the previous clock. The counters will peg out at
8249 * 0xffffffff without wrapping around so once they pass the 1s
8250 * threshold they'll stay above that till the IDMA state changes.
8252 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13);
8253 idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH);
8254 idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
8256 for (i = 0; i < 2; i++) {
8257 u32 debug0, debug11;
8259 /* If the Ingress DMA Same State Counter ("timer") is less
8260 * than 1s, then we can reset our synthesized Stall Timer and
8261 * continue. If we have previously emitted warnings about a
8262 * potential stalled Ingress Queue, issue a note indicating
8263 * that the Ingress Queue has resumed forward progress.
8265 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
8266 if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz)
8267 CH_WARN(adapter, "SGE idma%d, queue %u, "
8268 "resumed after %d seconds\n",
8269 i, idma->idma_qid[i],
8270 idma->idma_stalled[i]/hz);
8271 idma->idma_stalled[i] = 0;
8275 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
8276 * domain. The first time we get here it'll be because we
8277 * passed the 1s Threshold; each additional time it'll be
8278 * because the RX Timer Callback is being fired on its regular
8281 * If the stall is below our Potential Hung Ingress Queue
8282 * Warning Threshold, continue.
8284 if (idma->idma_stalled[i] == 0) {
8285 idma->idma_stalled[i] = hz;
8286 idma->idma_warn[i] = 0;
8288 idma->idma_stalled[i] += ticks;
8289 idma->idma_warn[i] -= ticks;
8292 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz)
8295 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
8297 if (idma->idma_warn[i] > 0)
8299 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz;
8301 /* Read and save the SGE IDMA State and Queue ID information.
8302 * We do this every time in case it changes across time ...
8303 * can't be too careful ...
8305 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0);
8306 debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
8307 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
8309 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11);
8310 debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
8311 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
8313 CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in "
8314 " state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
8315 i, idma->idma_qid[i], idma->idma_state[i],
8316 idma->idma_stalled[i]/hz,
8318 t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
8323 * t4_read_pace_tbl - read the pace table
8324 * @adap: the adapter
8325 * @pace_vals: holds the returned values
8327 * Returns the values of TP's pace table in microseconds.
8329 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
8333 for (i = 0; i < NTX_SCHED; i++) {
8334 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
8335 v = t4_read_reg(adap, A_TP_PACE_TABLE);
8336 pace_vals[i] = dack_ticks_to_usec(adap, v);
8341 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
8342 * @adap: the adapter
8343 * @sched: the scheduler index
8344 * @kbps: the byte rate in Kbps
8345 * @ipg: the interpacket delay in tenths of nanoseconds
8347 * Return the current configuration of a HW Tx scheduler.
8349 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
8352 unsigned int v, addr, bpt, cpt;
8355 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
8356 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
8357 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
8360 bpt = (v >> 8) & 0xff;
8363 *kbps = 0; /* scheduler disabled */
8365 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
8366 *kbps = (v * bpt) / 125;
8370 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
8371 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
8372 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
8376 *ipg = (10000 * v) / core_ticks_per_usec(adap);
8381 * t4_load_cfg - download config file
8382 * @adap: the adapter
8383 * @cfg_data: the cfg text file to write
8384 * @size: text file size
8386 * Write the supplied config text file to the card's serial flash.
8388 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
8390 int ret, i, n, cfg_addr;
8392 unsigned int flash_cfg_start_sec;
8393 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
8395 cfg_addr = t4_flash_cfg_addr(adap);
8400 flash_cfg_start_sec = addr / SF_SEC_SIZE;
8402 if (size > FLASH_CFG_MAX_SIZE) {
8403 CH_ERR(adap, "cfg file too large, max is %u bytes\n",
8404 FLASH_CFG_MAX_SIZE);
8408 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
8410 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
8411 flash_cfg_start_sec + i - 1);
8413 * If size == 0 then we're simply erasing the FLASH sectors associated
8414 * with the on-adapter Firmware Configuration File.
8416 if (ret || size == 0)
8419 /* this will write to the flash up to SF_PAGE_SIZE at a time */
8420 for (i = 0; i< size; i+= SF_PAGE_SIZE) {
8421 if ( (size - i) < SF_PAGE_SIZE)
8425 ret = t4_write_flash(adap, addr, n, cfg_data, 1);
8429 addr += SF_PAGE_SIZE;
8430 cfg_data += SF_PAGE_SIZE;
8435 CH_ERR(adap, "config file %s failed %d\n",
8436 (size == 0 ? "clear" : "download"), ret);
8441 * t5_fw_init_extern_mem - initialize the external memory
8442 * @adap: the adapter
8444 * Initializes the external memory on T5.
8446 int t5_fw_init_extern_mem(struct adapter *adap)
8448 u32 params[1], val[1];
8454 val[0] = 0xff; /* Initialize all MCs */
8455 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
8456 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT));
8457 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val,
8458 FW_CMD_MAX_TIMEOUT);
8463 /* BIOS boot headers */
8464 typedef struct pci_expansion_rom_header {
8465 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
8466 u8 reserved[22]; /* Reserved per processor Architecture data */
8467 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
8468 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
8470 /* Legacy PCI Expansion ROM Header */
8471 typedef struct legacy_pci_expansion_rom_header {
8472 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
8473 u8 size512; /* Current Image Size in units of 512 bytes */
8474 u8 initentry_point[4];
8475 u8 cksum; /* Checksum computed on the entire Image */
8476 u8 reserved[16]; /* Reserved */
8477 u8 pcir_offset[2]; /* Offset to PCI Data Struture */
8478 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
8480 /* EFI PCI Expansion ROM Header */
8481 typedef struct efi_pci_expansion_rom_header {
8482 u8 signature[2]; // ROM signature. The value 0xaa55
8483 u8 initialization_size[2]; /* Units 512. Includes this header */
8484 u8 efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
8485 u8 efi_subsystem[2]; /* Subsystem value for EFI image header */
8486 u8 efi_machine_type[2]; /* Machine type from EFI image header */
8487 u8 compression_type[2]; /* Compression type. */
8489 * Compression type definition
8492 * 0x2-0xFFFF: Reserved
8494 u8 reserved[8]; /* Reserved */
8495 u8 efi_image_header_offset[2]; /* Offset to EFI Image */
8496 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
8497 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
8499 /* PCI Data Structure Format */
8500 typedef struct pcir_data_structure { /* PCI Data Structure */
8501 u8 signature[4]; /* Signature. The string "PCIR" */
8502 u8 vendor_id[2]; /* Vendor Identification */
8503 u8 device_id[2]; /* Device Identification */
8504 u8 vital_product[2]; /* Pointer to Vital Product Data */
8505 u8 length[2]; /* PCIR Data Structure Length */
8506 u8 revision; /* PCIR Data Structure Revision */
8507 u8 class_code[3]; /* Class Code */
8508 u8 image_length[2]; /* Image Length. Multiple of 512B */
8509 u8 code_revision[2]; /* Revision Level of Code/Data */
8510 u8 code_type; /* Code Type. */
8512 * PCI Expansion ROM Code Types
8513 * 0x00: Intel IA-32, PC-AT compatible. Legacy
8514 * 0x01: Open Firmware standard for PCI. FCODE
8515 * 0x02: Hewlett-Packard PA RISC. HP reserved
8516 * 0x03: EFI Image. EFI
8517 * 0x04-0xFF: Reserved.
8519 u8 indicator; /* Indicator. Identifies the last image in the ROM */
8520 u8 reserved[2]; /* Reserved */
8521 } pcir_data_t; /* PCI__DATA_STRUCTURE */
8523 /* BOOT constants */
8525 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
8526 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */
8527 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */
8528 BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
8529 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment */
8530 VENDOR_ID = 0x1425, /* Vendor ID */
8531 PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
8535 * modify_device_id - Modifies the device ID of the Boot BIOS image
8536 * @adatper: the device ID to write.
8537 * @boot_data: the boot image to modify.
8539 * Write the supplied device ID to the boot BIOS image.
8541 static void modify_device_id(int device_id, u8 *boot_data)
8543 legacy_pci_exp_rom_header_t *header;
8544 pcir_data_t *pcir_header;
8548 * Loop through all chained images and change the device ID's
8551 header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
8552 pcir_header = (pcir_data_t *) &boot_data[cur_header +
8553 le16_to_cpu(*(u16*)header->pcir_offset)];
8556 * Only modify the Device ID if code type is Legacy or HP.
8557 * 0x00: Okay to modify
8558 * 0x01: FCODE. Do not be modify
8559 * 0x03: Okay to modify
8560 * 0x04-0xFF: Do not modify
8562 if (pcir_header->code_type == 0x00) {
8567 * Modify Device ID to match current adatper
8569 *(u16*) pcir_header->device_id = device_id;
8572 * Set checksum temporarily to 0.
8573 * We will recalculate it later.
8575 header->cksum = 0x0;
8578 * Calculate and update checksum
8580 for (i = 0; i < (header->size512 * 512); i++)
8581 csum += (u8)boot_data[cur_header + i];
8584 * Invert summed value to create the checksum
8585 * Writing new checksum value directly to the boot data
8587 boot_data[cur_header + 7] = -csum;
8589 } else if (pcir_header->code_type == 0x03) {
8592 * Modify Device ID to match current adatper
8594 *(u16*) pcir_header->device_id = device_id;
8600 * Check indicator element to identify if this is the last
8603 if (pcir_header->indicator & 0x80)
8607 * Move header pointer up to the next image in the ROM.
8609 cur_header += header->size512 * 512;
8614 * t4_load_boot - download boot flash
8615 * @adapter: the adapter
8616 * @boot_data: the boot image to write
8617 * @boot_addr: offset in flash to write boot_data
8620 * Write the supplied boot image to the card's serial flash.
8621 * The boot image has the following sections: a 28-byte header and the
8624 int t4_load_boot(struct adapter *adap, u8 *boot_data,
8625 unsigned int boot_addr, unsigned int size)
8627 pci_exp_rom_header_t *header;
8629 pcir_data_t *pcir_header;
8633 unsigned int boot_sector = (boot_addr * 1024 );
8634 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
8637 * Make sure the boot image does not encroach on the firmware region
8639 if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
8640 CH_ERR(adap, "boot image encroaching on firmware region\n");
8645 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
8646 * and Boot configuration data sections. These 3 boot sections span
8647 * sectors 0 to 7 in flash and live right before the FW image location.
8649 i = DIV_ROUND_UP(size ? size : FLASH_FW_START,
8651 ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
8652 (boot_sector >> 16) + i - 1);
8655 * If size == 0 then we're simply erasing the FLASH sectors associated
8656 * with the on-adapter option ROM file
8658 if (ret || (size == 0))
8661 /* Get boot header */
8662 header = (pci_exp_rom_header_t *)boot_data;
8663 pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
8664 /* PCIR Data Structure */
8665 pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
8668 * Perform some primitive sanity testing to avoid accidentally
8669 * writing garbage over the boot sectors. We ought to check for
8670 * more but it's not worth it for now ...
8672 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
8673 CH_ERR(adap, "boot image too small/large\n");
8677 #ifndef CHELSIO_T4_DIAGS
8679 * Check BOOT ROM header signature
8681 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
8682 CH_ERR(adap, "Boot image missing signature\n");
8687 * Check PCI header signature
8689 if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
8690 CH_ERR(adap, "PCI header missing signature\n");
8695 * Check Vendor ID matches Chelsio ID
8697 if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
8698 CH_ERR(adap, "Vendor ID missing signature\n");
8704 * Retrieve adapter's device ID
8706 t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
8707 /* Want to deal with PF 0 so I strip off PF 4 indicator */
8708 device_id = device_id & 0xf0ff;
8711 * Check PCIE Device ID
8713 if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
8715 * Change the device ID in the Boot BIOS image to match
8716 * the Device ID of the current adapter.
8718 modify_device_id(device_id, boot_data);
8722 * Skip over the first SF_PAGE_SIZE worth of data and write it after
8723 * we finish copying the rest of the boot image. This will ensure
8724 * that the BIOS boot header will only be written if the boot image
8725 * was written in full.
8728 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
8729 addr += SF_PAGE_SIZE;
8730 boot_data += SF_PAGE_SIZE;
8731 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
8736 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
8737 (const u8 *)header, 0);
8741 CH_ERR(adap, "boot image download failed, error %d\n", ret);
8746 * t4_flash_bootcfg_addr - return the address of the flash optionrom configuration
8747 * @adapter: the adapter
8749 * Return the address within the flash where the OptionROM Configuration
8750 * is stored, or an error if the device FLASH is too small to contain
8751 * a OptionROM Configuration.
8753 static int t4_flash_bootcfg_addr(struct adapter *adapter)
8756 * If the device FLASH isn't large enough to hold a Firmware
8757 * Configuration File, return an error.
8759 if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE)
8762 return FLASH_BOOTCFG_START;
8765 int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size)
8767 int ret, i, n, cfg_addr;
8769 unsigned int flash_cfg_start_sec;
8770 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
8772 cfg_addr = t4_flash_bootcfg_addr(adap);
8777 flash_cfg_start_sec = addr / SF_SEC_SIZE;
8779 if (size > FLASH_BOOTCFG_MAX_SIZE) {
8780 CH_ERR(adap, "bootcfg file too large, max is %u bytes\n",
8781 FLASH_BOOTCFG_MAX_SIZE);
8785 i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */
8787 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
8788 flash_cfg_start_sec + i - 1);
8791 * If size == 0 then we're simply erasing the FLASH sectors associated
8792 * with the on-adapter OptionROM Configuration File.
8794 if (ret || size == 0)
8797 /* this will write to the flash up to SF_PAGE_SIZE at a time */
8798 for (i = 0; i< size; i+= SF_PAGE_SIZE) {
8799 if ( (size - i) < SF_PAGE_SIZE)
8803 ret = t4_write_flash(adap, addr, n, cfg_data, 0);
8807 addr += SF_PAGE_SIZE;
8808 cfg_data += SF_PAGE_SIZE;
8813 CH_ERR(adap, "boot config data %s failed %d\n",
8814 (size == 0 ? "clear" : "download"), ret);
8819 * t4_set_filter_mode - configure the optional components of filter tuples
8820 * @adap: the adapter
8821 * @mode_map: a bitmap selcting which optional filter components to enable
8823 * Sets the filter mode by selecting the optional components to enable
8824 * in filter tuples. Returns 0 on success and a negative error if the
8825 * requested mode needs more bits than are available for optional
8828 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map)
8830 static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
8834 for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
8835 if (mode_map & (1 << i))
8837 if (nbits > FILTER_OPT_LEN)
8839 if (t4_use_ldst(adap))
8840 t4_fw_tp_pio_rw(adap, &mode_map, 1, A_TP_VLAN_PRI_MAP, 0);
8842 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map,
8843 1, A_TP_VLAN_PRI_MAP);
8844 read_filter_mode_and_ingress_config(adap);
8850 * t4_clr_port_stats - clear port statistics
8851 * @adap: the adapter
8852 * @idx: the port index
8854 * Clear HW statistics for the given port.
8856 void t4_clr_port_stats(struct adapter *adap, int idx)
8859 u32 bgmap = t4_get_mps_bg_map(adap, idx);
8863 port_base_addr = PORT_BASE(idx);
8865 port_base_addr = T5_PORT_BASE(idx);
8867 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
8868 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
8869 t4_write_reg(adap, port_base_addr + i, 0);
8870 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
8871 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
8872 t4_write_reg(adap, port_base_addr + i, 0);
8873 for (i = 0; i < 4; i++)
8874 if (bgmap & (1 << i)) {
8876 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
8878 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
8883 * t4_i2c_rd - read I2C data from adapter
8884 * @adap: the adapter
8885 * @port: Port number if per-port device; <0 if not
8886 * @devid: per-port device ID or absolute device ID
8887 * @offset: byte offset into device I2C space
8888 * @len: byte length of I2C space data
8889 * @buf: buffer in which to return I2C data
8891 * Reads the I2C data from the indicated device and location.
8893 int t4_i2c_rd(struct adapter *adap, unsigned int mbox,
8894 int port, unsigned int devid,
8895 unsigned int offset, unsigned int len,
8899 struct fw_ldst_cmd ldst;
8905 len > sizeof ldst.u.i2c.data)
8908 memset(&ldst, 0, sizeof ldst);
8909 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
8910 ldst.op_to_addrspace =
8911 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
8915 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
8916 ldst.u.i2c.pid = (port < 0 ? 0xff : port);
8917 ldst.u.i2c.did = devid;
8918 ldst.u.i2c.boffset = offset;
8919 ldst.u.i2c.blen = len;
8920 ret = t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
8922 memcpy(buf, ldst.u.i2c.data, len);
8927 * t4_i2c_wr - write I2C data to adapter
8928 * @adap: the adapter
8929 * @port: Port number if per-port device; <0 if not
8930 * @devid: per-port device ID or absolute device ID
8931 * @offset: byte offset into device I2C space
8932 * @len: byte length of I2C space data
8933 * @buf: buffer containing new I2C data
8935 * Write the I2C data to the indicated device and location.
8937 int t4_i2c_wr(struct adapter *adap, unsigned int mbox,
8938 int port, unsigned int devid,
8939 unsigned int offset, unsigned int len,
8943 struct fw_ldst_cmd ldst;
8948 len > sizeof ldst.u.i2c.data)
8951 memset(&ldst, 0, sizeof ldst);
8952 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
8953 ldst.op_to_addrspace =
8954 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
8958 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
8959 ldst.u.i2c.pid = (port < 0 ? 0xff : port);
8960 ldst.u.i2c.did = devid;
8961 ldst.u.i2c.boffset = offset;
8962 ldst.u.i2c.blen = len;
8963 memcpy(ldst.u.i2c.data, buf, len);
8964 return t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
8968 * t4_sge_ctxt_rd - read an SGE context through FW
8969 * @adap: the adapter
8970 * @mbox: mailbox to use for the FW command
8971 * @cid: the context id
8972 * @ctype: the context type
8973 * @data: where to store the context data
8975 * Issues a FW command through the given mailbox to read an SGE context.
8977 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
8978 enum ctxt_type ctype, u32 *data)
8981 struct fw_ldst_cmd c;
8983 if (ctype == CTXT_EGRESS)
8984 ret = FW_LDST_ADDRSPC_SGE_EGRC;
8985 else if (ctype == CTXT_INGRESS)
8986 ret = FW_LDST_ADDRSPC_SGE_INGC;
8987 else if (ctype == CTXT_FLM)
8988 ret = FW_LDST_ADDRSPC_SGE_FLMC;
8990 ret = FW_LDST_ADDRSPC_SGE_CONMC;
8992 memset(&c, 0, sizeof(c));
8993 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
8994 F_FW_CMD_REQUEST | F_FW_CMD_READ |
8995 V_FW_LDST_CMD_ADDRSPACE(ret));
8996 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
8997 c.u.idctxt.physid = cpu_to_be32(cid);
8999 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
9001 data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
9002 data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
9003 data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
9004 data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
9005 data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
9006 data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
9012 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
9013 * @adap: the adapter
9014 * @cid: the context id
9015 * @ctype: the context type
9016 * @data: where to store the context data
9018 * Reads an SGE context directly, bypassing FW. This is only for
9019 * debugging when FW is unavailable.
9021 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
9026 t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
9027 ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
9029 for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
9030 *data++ = t4_read_reg(adap, i);
9034 int t4_sched_config(struct adapter *adapter, int type, int minmaxen,
9037 struct fw_sched_cmd cmd;
9039 memset(&cmd, 0, sizeof(cmd));
9040 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
9043 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9045 cmd.u.config.sc = FW_SCHED_SC_CONFIG;
9046 cmd.u.config.type = type;
9047 cmd.u.config.minmaxen = minmaxen;
9049 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
9053 int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
9054 int rateunit, int ratemode, int channel, int cl,
9055 int minrate, int maxrate, int weight, int pktsize,
9058 struct fw_sched_cmd cmd;
9060 memset(&cmd, 0, sizeof(cmd));
9061 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
9064 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9066 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
9067 cmd.u.params.type = type;
9068 cmd.u.params.level = level;
9069 cmd.u.params.mode = mode;
9070 cmd.u.params.ch = channel;
9071 cmd.u.params.cl = cl;
9072 cmd.u.params.unit = rateunit;
9073 cmd.u.params.rate = ratemode;
9074 cmd.u.params.min = cpu_to_be32(minrate);
9075 cmd.u.params.max = cpu_to_be32(maxrate);
9076 cmd.u.params.weight = cpu_to_be16(weight);
9077 cmd.u.params.pktsize = cpu_to_be16(pktsize);
9079 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
9084 * t4_config_watchdog - configure (enable/disable) a watchdog timer
9085 * @adapter: the adapter
9086 * @mbox: mailbox to use for the FW command
9087 * @pf: the PF owning the queue
9088 * @vf: the VF owning the queue
9089 * @timeout: watchdog timeout in ms
9090 * @action: watchdog timer / action
9092 * There are separate watchdog timers for each possible watchdog
9093 * action. Configure one of the watchdog timers by setting a non-zero
9094 * timeout. Disable a watchdog timer by using a timeout of zero.
9096 int t4_config_watchdog(struct adapter *adapter, unsigned int mbox,
9097 unsigned int pf, unsigned int vf,
9098 unsigned int timeout, unsigned int action)
9100 struct fw_watchdog_cmd wdog;
9104 * The watchdog command expects a timeout in units of 10ms so we need
9105 * to convert it here (via rounding) and force a minimum of one 10ms
9106 * "tick" if the timeout is non-zero but the convertion results in 0
9109 ticks = (timeout + 5)/10;
9110 if (timeout && !ticks)
9113 memset(&wdog, 0, sizeof wdog);
9114 wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) |
9117 V_FW_PARAMS_CMD_PFN(pf) |
9118 V_FW_PARAMS_CMD_VFN(vf));
9119 wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog));
9120 wdog.timeout = cpu_to_be32(ticks);
9121 wdog.action = cpu_to_be32(action);
9123 return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL);
9126 int t4_get_devlog_level(struct adapter *adapter, unsigned int *level)
9128 struct fw_devlog_cmd devlog_cmd;
9131 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
9132 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
9133 F_FW_CMD_REQUEST | F_FW_CMD_READ);
9134 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
9135 ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
9136 sizeof(devlog_cmd), &devlog_cmd);
9140 *level = devlog_cmd.level;
9144 int t4_set_devlog_level(struct adapter *adapter, unsigned int level)
9146 struct fw_devlog_cmd devlog_cmd;
9148 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
9149 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
9152 devlog_cmd.level = level;
9153 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
9154 return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
9155 sizeof(devlog_cmd), &devlog_cmd);