2 * Copyright (c) 2012, 2016 Chelsio Communications, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/eventhandler.h>
37 #include "t4_regs_values.h"
38 #include "firmware/t4fw_interface.h"
41 #define msleep(x) do { \
45 pause("t4hw", (x) * hz / 1000); \
49 * t4_wait_op_done_val - wait until an operation is completed
50 * @adapter: the adapter performing the operation
51 * @reg: the register to check for completion
52 * @mask: a single-bit field within @reg that indicates completion
53 * @polarity: the value of the field when the operation is completed
54 * @attempts: number of check iterations
55 * @delay: delay in usecs between iterations
56 * @valp: where to store the value of the register at completion time
58 * Wait until an operation is completed by checking a bit in a register
59 * up to @attempts times. If @valp is not NULL the value of the register
60 * at the time it indicated completion is stored there. Returns 0 if the
61 * operation completes and -EAGAIN otherwise.
63 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
64 int polarity, int attempts, int delay, u32 *valp)
67 u32 val = t4_read_reg(adapter, reg);
69 if (!!(val & mask) == polarity) {
81 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
82 int polarity, int attempts, int delay)
84 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
89 * t4_set_reg_field - set a register field to a value
90 * @adapter: the adapter to program
91 * @addr: the register address
92 * @mask: specifies the portion of the register to modify
93 * @val: the new value for the register field
95 * Sets a register field specified by the supplied mask to the
98 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
101 u32 v = t4_read_reg(adapter, addr) & ~mask;
103 t4_write_reg(adapter, addr, v | val);
104 (void) t4_read_reg(adapter, addr); /* flush */
108 * t4_read_indirect - read indirectly addressed registers
110 * @addr_reg: register holding the indirect address
111 * @data_reg: register holding the value of the indirect register
112 * @vals: where the read register values are stored
113 * @nregs: how many indirect registers to read
114 * @start_idx: index of first indirect register to read
116 * Reads registers that are accessed indirectly through an address/data
119 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
120 unsigned int data_reg, u32 *vals,
121 unsigned int nregs, unsigned int start_idx)
124 t4_write_reg(adap, addr_reg, start_idx);
125 *vals++ = t4_read_reg(adap, data_reg);
131 * t4_write_indirect - write indirectly addressed registers
133 * @addr_reg: register holding the indirect addresses
134 * @data_reg: register holding the value for the indirect registers
135 * @vals: values to write
136 * @nregs: how many indirect registers to write
137 * @start_idx: address of first indirect register to write
139 * Writes a sequential block of registers that are accessed indirectly
140 * through an address/data register pair.
142 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
143 unsigned int data_reg, const u32 *vals,
144 unsigned int nregs, unsigned int start_idx)
147 t4_write_reg(adap, addr_reg, start_idx++);
148 t4_write_reg(adap, data_reg, *vals++);
153 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
154 * mechanism. This guarantees that we get the real value even if we're
155 * operating within a Virtual Machine and the Hypervisor is trapping our
156 * Configuration Space accesses.
158 * N.B. This routine should only be used as a last resort: the firmware uses
159 * the backdoor registers on a regular basis and we can end up
160 * conflicting with it's uses!
162 u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
164 u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg);
167 if (chip_id(adap) <= CHELSIO_T5)
175 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req);
176 val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
179 * Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
180 * Configuration Space read. (None of the other fields matter when
181 * F_ENABLE is 0 so a simple register write is easier than a
182 * read-modify-write via t4_set_reg_field().)
184 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0);
190 * t4_report_fw_error - report firmware error
193 * The adapter firmware can indicate error conditions to the host.
194 * If the firmware has indicated an error, print out the reason for
195 * the firmware error.
197 static void t4_report_fw_error(struct adapter *adap)
199 static const char *const reason[] = {
200 "Crash", /* PCIE_FW_EVAL_CRASH */
201 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
202 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
203 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
204 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
205 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
206 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
207 "Reserved", /* reserved */
211 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
212 if (pcie_fw & F_PCIE_FW_ERR)
213 CH_ERR(adap, "Firmware reports adapter error: %s\n",
214 reason[G_PCIE_FW_EVAL(pcie_fw)]);
218 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
220 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
223 for ( ; nflit; nflit--, mbox_addr += 8)
224 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
228 * Handle a FW assertion reported in a mailbox.
230 static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt)
233 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
234 asrt->u.assert.filename_0_7,
235 be32_to_cpu(asrt->u.assert.line),
236 be32_to_cpu(asrt->u.assert.x),
237 be32_to_cpu(asrt->u.assert.y));
240 #define X_CIM_PF_NOACCESS 0xeeeeeeee
242 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
244 * @mbox: index of the mailbox to use
245 * @cmd: the command to write
246 * @size: command length in bytes
247 * @rpl: where to optionally store the reply
248 * @sleep_ok: if true we may sleep while awaiting command completion
249 * @timeout: time to wait for command to finish before timing out
250 * (negative implies @sleep_ok=false)
252 * Sends the given command to FW through the selected mailbox and waits
253 * for the FW to execute the command. If @rpl is not %NULL it is used to
254 * store the FW's reply to the command. The command and its optional
255 * reply are of the same length. Some FW commands like RESET and
256 * INITIALIZE can take a considerable amount of time to execute.
257 * @sleep_ok determines whether we may sleep while awaiting the response.
258 * If sleeping is allowed we use progressive backoff otherwise we spin.
259 * Note that passing in a negative @timeout is an alternate mechanism
260 * for specifying @sleep_ok=false. This is useful when a higher level
261 * interface allows for specification of @timeout but not @sleep_ok ...
263 * The return value is 0 on success or a negative errno on failure. A
264 * failure can happen either because we are not able to execute the
265 * command or FW executes it but signals an error. In the latter case
266 * the return value is the error code indicated by FW (negated).
268 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
269 int size, void *rpl, bool sleep_ok, int timeout)
272 * We delay in small increments at first in an effort to maintain
273 * responsiveness for simple, fast executing commands but then back
274 * off to larger delays to a maximum retry delay.
276 static const int delay[] = {
277 1, 1, 3, 5, 10, 10, 20, 50, 100
281 int i, ms, delay_idx, ret;
282 const __be64 *p = cmd;
283 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
284 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
286 __be64 cmd_rpl[MBOX_LEN/8];
289 if ((size & 15) || size > MBOX_LEN)
293 * If we have a negative timeout, that implies that we can't sleep.
301 * Attempt to gain access to the mailbox.
303 for (i = 0; i < 4; i++) {
304 ctl = t4_read_reg(adap, ctl_reg);
306 if (v != X_MBOWNER_NONE)
311 * If we were unable to gain access, dequeue ourselves from the
312 * mailbox atomic access list and report the error to our caller.
314 if (v != X_MBOWNER_PL) {
315 t4_report_fw_error(adap);
316 ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
321 * If we gain ownership of the mailbox and there's a "valid" message
322 * in it, this is likely an asynchronous error message from the
323 * firmware. So we'll report that and then proceed on with attempting
324 * to issue our own command ... which may well fail if the error
325 * presaged the firmware crashing ...
327 if (ctl & F_MBMSGVALID) {
328 CH_ERR(adap, "found VALID command in mbox %u: "
329 "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
330 (unsigned long long)t4_read_reg64(adap, data_reg),
331 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
332 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
333 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
334 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
335 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
336 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
337 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
341 * Copy in the new mailbox command and send it on its way ...
343 for (i = 0; i < size; i += 8, p++)
344 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
346 CH_DUMP_MBOX(adap, mbox, data_reg);
348 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
349 t4_read_reg(adap, ctl_reg); /* flush write */
355 * Loop waiting for the reply; bail out if we time out or the firmware
359 !((pcie_fw = t4_read_reg(adap, A_PCIE_FW)) & F_PCIE_FW_ERR) &&
363 ms = delay[delay_idx]; /* last element may repeat */
364 if (delay_idx < ARRAY_SIZE(delay) - 1)
371 v = t4_read_reg(adap, ctl_reg);
372 if (v == X_CIM_PF_NOACCESS)
374 if (G_MBOWNER(v) == X_MBOWNER_PL) {
375 if (!(v & F_MBMSGVALID)) {
376 t4_write_reg(adap, ctl_reg,
377 V_MBOWNER(X_MBOWNER_NONE));
382 * Retrieve the command reply and release the mailbox.
384 get_mbox_rpl(adap, cmd_rpl, size/8, data_reg);
385 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
387 CH_DUMP_MBOX(adap, mbox, data_reg);
389 res = be64_to_cpu(cmd_rpl[0]);
390 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
391 fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl);
392 res = V_FW_CMD_RETVAL(EIO);
394 memcpy(rpl, cmd_rpl, size);
395 return -G_FW_CMD_RETVAL((int)res);
400 * We timed out waiting for a reply to our mailbox command. Report
401 * the error and also check to see if the firmware reported any
404 ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
405 CH_ERR(adap, "command %#x in mailbox %d timed out\n",
406 *(const u8 *)cmd, mbox);
408 t4_report_fw_error(adap);
413 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
414 void *rpl, bool sleep_ok)
416 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl,
417 sleep_ok, FW_CMD_MAX_TIMEOUT);
421 static int t4_edc_err_read(struct adapter *adap, int idx)
423 u32 edc_ecc_err_addr_reg;
424 u32 edc_bist_status_rdata_reg;
427 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
430 if (idx != 0 && idx != 1) {
431 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
435 edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx);
436 edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx);
439 "edc%d err addr 0x%x: 0x%x.\n",
440 idx, edc_ecc_err_addr_reg,
441 t4_read_reg(adap, edc_ecc_err_addr_reg));
443 "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
444 edc_bist_status_rdata_reg,
445 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg),
446 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8),
447 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16),
448 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24),
449 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32),
450 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40),
451 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48),
452 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56),
453 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64));
459 * t4_mc_read - read from MC through backdoor accesses
461 * @idx: which MC to access
462 * @addr: address of first byte requested
463 * @data: 64 bytes of data containing the requested address
464 * @ecc: where to store the corresponding 64-bit ECC word
466 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
467 * that covers the requested address @addr. If @parity is not %NULL it
468 * is assigned the 64-bit ECC word for the read data.
470 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
473 u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
474 u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
477 mc_bist_cmd_reg = A_MC_BIST_CMD;
478 mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR;
479 mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN;
480 mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA;
481 mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN;
483 mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx);
484 mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx);
485 mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx);
486 mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA,
488 mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN,
492 if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST)
494 t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU);
495 t4_write_reg(adap, mc_bist_cmd_len_reg, 64);
496 t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc);
497 t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) |
498 F_START_BIST | V_BIST_CMD_GAP(1));
499 i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
503 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i)
505 for (i = 15; i >= 0; i--)
506 *data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
508 *ecc = t4_read_reg64(adap, MC_DATA(16));
514 * t4_edc_read - read from EDC through backdoor accesses
516 * @idx: which EDC to access
517 * @addr: address of first byte requested
518 * @data: 64 bytes of data containing the requested address
519 * @ecc: where to store the corresponding 64-bit ECC word
521 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
522 * that covers the requested address @addr. If @parity is not %NULL it
523 * is assigned the 64-bit ECC word for the read data.
525 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
528 u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
529 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
532 edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx);
533 edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx);
534 edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx);
535 edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN,
537 edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA,
541 * These macro are missing in t4_regs.h file.
542 * Added temporarily for testing.
544 #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
545 #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
546 edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx);
547 edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx);
548 edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx);
549 edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN,
551 edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA,
557 if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST)
559 t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU);
560 t4_write_reg(adap, edc_bist_cmd_len_reg, 64);
561 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
562 t4_write_reg(adap, edc_bist_cmd_reg,
563 V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
564 i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
568 #define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i)
570 for (i = 15; i >= 0; i--)
571 *data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
573 *ecc = t4_read_reg64(adap, EDC_DATA(16));
579 * t4_mem_read - read EDC 0, EDC 1 or MC into buffer
581 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
582 * @addr: address within indicated memory type
583 * @len: amount of memory to read
584 * @buf: host memory buffer
586 * Reads an [almost] arbitrary memory region in the firmware: the
587 * firmware memory address, length and host buffer must be aligned on
588 * 32-bit boudaries. The memory is returned as a raw byte sequence from
589 * the firmware's memory. If this memory contains data structures which
590 * contain multi-byte integers, it's the callers responsibility to
591 * perform appropriate byte order conversions.
593 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
596 u32 pos, start, end, offset;
600 * Argument sanity checks ...
602 if ((addr & 0x3) || (len & 0x3))
606 * The underlaying EDC/MC read routines read 64 bytes at a time so we
607 * need to round down the start and round up the end. We'll start
608 * copying out of the first line at (addr - start) a word at a time.
610 start = addr & ~(64-1);
611 end = (addr + len + 64-1) & ~(64-1);
612 offset = (addr - start)/sizeof(__be32);
614 for (pos = start; pos < end; pos += 64, offset = 0) {
618 * Read the chip's memory block and bail if there's an error.
620 if ((mtype == MEM_MC) || (mtype == MEM_MC1))
621 ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL);
623 ret = t4_edc_read(adap, mtype, pos, data, NULL);
628 * Copy the data into the caller's memory buffer.
630 while (offset < 16 && len > 0) {
631 *buf++ = data[offset++];
632 len -= sizeof(__be32);
640 * Return the specified PCI-E Configuration Space register from our Physical
641 * Function. We try first via a Firmware LDST Command (if fw_attach != 0)
642 * since we prefer to let the firmware own all of these registers, but if that
643 * fails we go for it directly ourselves.
645 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach)
649 * If fw_attach != 0, construct and send the Firmware LDST Command to
650 * retrieve the specified PCI-E Configuration Space register.
652 if (drv_fw_attach != 0) {
653 struct fw_ldst_cmd ldst_cmd;
656 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
657 ldst_cmd.op_to_addrspace =
658 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
661 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
662 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
663 ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1);
664 ldst_cmd.u.pcie.ctrl_to_fn =
665 (F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf));
666 ldst_cmd.u.pcie.r = reg;
669 * If the LDST Command succeeds, return the result, otherwise
670 * fall through to reading it directly ourselves ...
672 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
675 return be32_to_cpu(ldst_cmd.u.pcie.data[0]);
677 CH_WARN(adap, "Firmware failed to return "
678 "Configuration Space register %d, err = %d\n",
683 * Read the desired Configuration Space register via the PCI-E
684 * Backdoor mechanism.
686 return t4_hw_pci_read_cfg4(adap, reg);
690 * t4_get_regs_len - return the size of the chips register set
691 * @adapter: the adapter
693 * Returns the size of the chip's BAR0 register space.
695 unsigned int t4_get_regs_len(struct adapter *adapter)
697 unsigned int chip_version = chip_id(adapter);
699 switch (chip_version) {
701 return T4_REGMAP_SIZE;
705 return T5_REGMAP_SIZE;
709 "Unsupported chip version %d\n", chip_version);
714 * t4_get_regs - read chip registers into provided buffer
716 * @buf: register buffer
717 * @buf_size: size (in bytes) of register buffer
719 * If the provided register buffer isn't large enough for the chip's
720 * full register range, the register dump will be truncated to the
721 * register buffer's size.
723 void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
725 static const unsigned int t4_reg_ranges[] = {
1183 static const unsigned int t5_reg_ranges[] = {
1958 static const unsigned int t6_reg_ranges[] = {
2535 u32 *buf_end = (u32 *)(buf + buf_size);
2536 const unsigned int *reg_ranges;
2537 int reg_ranges_size, range;
2538 unsigned int chip_version = chip_id(adap);
2541 * Select the right set of register ranges to dump depending on the
2542 * adapter chip type.
2544 switch (chip_version) {
2546 reg_ranges = t4_reg_ranges;
2547 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
2551 reg_ranges = t5_reg_ranges;
2552 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
2556 reg_ranges = t6_reg_ranges;
2557 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
2562 "Unsupported chip version %d\n", chip_version);
2567 * Clear the register buffer and insert the appropriate register
2568 * values selected by the above register ranges.
2570 memset(buf, 0, buf_size);
2571 for (range = 0; range < reg_ranges_size; range += 2) {
2572 unsigned int reg = reg_ranges[range];
2573 unsigned int last_reg = reg_ranges[range + 1];
2574 u32 *bufp = (u32 *)(buf + reg);
2577 * Iterate across the register range filling in the register
2578 * buffer but don't write past the end of the register buffer.
2580 while (reg <= last_reg && bufp < buf_end) {
2581 *bufp++ = t4_read_reg(adap, reg);
2588 * Partial EEPROM Vital Product Data structure. Includes only the ID and
2600 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
2602 #define EEPROM_DELAY 10 /* 10us per poll spin */
2603 #define EEPROM_MAX_POLL 5000 /* x 5000 == 50ms */
2605 #define EEPROM_STAT_ADDR 0x7bfc
2606 #define VPD_BASE 0x400
2607 #define VPD_BASE_OLD 0
2608 #define VPD_LEN 1024
2609 #define VPD_INFO_FLD_HDR_SIZE 3
2610 #define CHELSIO_VPD_UNIQUE_ID 0x82
2613 * Small utility function to wait till any outstanding VPD Access is complete.
2614 * We have a per-adapter state variable "VPD Busy" to indicate when we have a
2615 * VPD Access in flight. This allows us to handle the problem of having a
2616 * previous VPD Access time out and prevent an attempt to inject a new VPD
2617 * Request before any in-flight VPD reguest has completed.
2619 static int t4_seeprom_wait(struct adapter *adapter)
2621 unsigned int base = adapter->params.pci.vpd_cap_addr;
2625 * If no VPD Access is in flight, we can just return success right
2628 if (!adapter->vpd_busy)
2632 * Poll the VPD Capability Address/Flag register waiting for it
2633 * to indicate that the operation is complete.
2635 max_poll = EEPROM_MAX_POLL;
2639 udelay(EEPROM_DELAY);
2640 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
2643 * If the operation is complete, mark the VPD as no longer
2644 * busy and return success.
2646 if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
2647 adapter->vpd_busy = 0;
2650 } while (--max_poll);
2653 * Failure! Note that we leave the VPD Busy status set in order to
2654 * avoid pushing a new VPD Access request into the VPD Capability till
2655 * the current operation eventually succeeds. It's a bug to issue a
2656 * new request when an existing request is in flight and will result
2657 * in corrupt hardware state.
2663 * t4_seeprom_read - read a serial EEPROM location
2664 * @adapter: adapter to read
2665 * @addr: EEPROM virtual address
2666 * @data: where to store the read data
2668 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
2669 * VPD capability. Note that this function must be called with a virtual
2672 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
2674 unsigned int base = adapter->params.pci.vpd_cap_addr;
2678 * VPD Accesses must alway be 4-byte aligned!
2680 if (addr >= EEPROMVSIZE || (addr & 3))
2684 * Wait for any previous operation which may still be in flight to
2687 ret = t4_seeprom_wait(adapter);
2689 CH_ERR(adapter, "VPD still busy from previous operation\n");
2694 * Issue our new VPD Read request, mark the VPD as being busy and wait
2695 * for our request to complete. If it doesn't complete, note the
2696 * error and return it to our caller. Note that we do not reset the
2699 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
2700 adapter->vpd_busy = 1;
2701 adapter->vpd_flag = PCI_VPD_ADDR_F;
2702 ret = t4_seeprom_wait(adapter);
2704 CH_ERR(adapter, "VPD read of address %#x failed\n", addr);
2709 * Grab the returned data, swizzle it into our endianess and
2712 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
2713 *data = le32_to_cpu(*data);
2718 * t4_seeprom_write - write a serial EEPROM location
2719 * @adapter: adapter to write
2720 * @addr: virtual EEPROM address
2721 * @data: value to write
2723 * Write a 32-bit word to a location in serial EEPROM using the card's PCI
2724 * VPD capability. Note that this function must be called with a virtual
2727 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
2729 unsigned int base = adapter->params.pci.vpd_cap_addr;
2735 * VPD Accesses must alway be 4-byte aligned!
2737 if (addr >= EEPROMVSIZE || (addr & 3))
2741 * Wait for any previous operation which may still be in flight to
2744 ret = t4_seeprom_wait(adapter);
2746 CH_ERR(adapter, "VPD still busy from previous operation\n");
2751 * Issue our new VPD Read request, mark the VPD as being busy and wait
2752 * for our request to complete. If it doesn't complete, note the
2753 * error and return it to our caller. Note that we do not reset the
2756 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
2758 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
2759 (u16)addr | PCI_VPD_ADDR_F);
2760 adapter->vpd_busy = 1;
2761 adapter->vpd_flag = 0;
2762 ret = t4_seeprom_wait(adapter);
2764 CH_ERR(adapter, "VPD write of address %#x failed\n", addr);
2769 * Reset PCI_VPD_DATA register after a transaction and wait for our
2770 * request to complete. If it doesn't complete, return error.
2772 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
2773 max_poll = EEPROM_MAX_POLL;
2775 udelay(EEPROM_DELAY);
2776 t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
2777 } while ((stats_reg & 0x1) && --max_poll);
2781 /* Return success! */
2786 * t4_eeprom_ptov - translate a physical EEPROM address to virtual
2787 * @phys_addr: the physical EEPROM address
2788 * @fn: the PCI function number
2789 * @sz: size of function-specific area
2791 * Translate a physical EEPROM address to virtual. The first 1K is
2792 * accessed through virtual addresses starting at 31K, the rest is
2793 * accessed through virtual addresses starting at 0.
2795 * The mapping is as follows:
2796 * [0..1K) -> [31K..32K)
2797 * [1K..1K+A) -> [ES-A..ES)
2798 * [1K+A..ES) -> [0..ES-A-1K)
2800 * where A = @fn * @sz, and ES = EEPROM size.
2802 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2805 if (phys_addr < 1024)
2806 return phys_addr + (31 << 10);
2807 if (phys_addr < 1024 + fn)
2808 return EEPROMSIZE - fn + phys_addr - 1024;
2809 if (phys_addr < EEPROMSIZE)
2810 return phys_addr - 1024 - fn;
2815 * t4_seeprom_wp - enable/disable EEPROM write protection
2816 * @adapter: the adapter
2817 * @enable: whether to enable or disable write protection
2819 * Enables or disables write protection on the serial EEPROM.
2821 int t4_seeprom_wp(struct adapter *adapter, int enable)
2823 return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
2827 * get_vpd_keyword_val - Locates an information field keyword in the VPD
2828 * @v: Pointer to buffered vpd data structure
2829 * @kw: The keyword to search for
2831 * Returns the value of the information field keyword or
2832 * -ENOENT otherwise.
2834 static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
2837 unsigned int offset , len;
2838 const u8 *buf = (const u8 *)v;
2839 const u8 *vpdr_len = &v->vpdr_len[0];
2840 offset = sizeof(struct t4_vpd_hdr);
2841 len = (u16)vpdr_len[0] + ((u16)vpdr_len[1] << 8);
2843 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
2847 for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
2848 if(memcmp(buf + i , kw , 2) == 0){
2849 i += VPD_INFO_FLD_HDR_SIZE;
2853 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
2861 * get_vpd_params - read VPD parameters from VPD EEPROM
2862 * @adapter: adapter to read
2863 * @p: where to store the parameters
2864 * @vpd: caller provided temporary space to read the VPD into
2866 * Reads card parameters stored in VPD EEPROM.
2868 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p,
2874 const struct t4_vpd_hdr *v;
2877 * Card information normally starts at VPD_BASE but early cards had
2880 ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd));
2885 * The VPD shall have a unique identifier specified by the PCI SIG.
2886 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
2887 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
2888 * is expected to automatically put this entry at the
2889 * beginning of the VPD.
2891 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
2893 for (i = 0; i < VPD_LEN; i += 4) {
2894 ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i));
2898 v = (const struct t4_vpd_hdr *)vpd;
2900 #define FIND_VPD_KW(var,name) do { \
2901 var = get_vpd_keyword_val(v , name); \
2903 CH_ERR(adapter, "missing VPD keyword " name "\n"); \
2908 FIND_VPD_KW(i, "RV");
2909 for (csum = 0; i >= 0; i--)
2914 "corrupted VPD EEPROM, actual csum %u\n", csum);
2918 FIND_VPD_KW(ec, "EC");
2919 FIND_VPD_KW(sn, "SN");
2920 FIND_VPD_KW(pn, "PN");
2921 FIND_VPD_KW(na, "NA");
2924 memcpy(p->id, v->id_data, ID_LEN);
2926 memcpy(p->ec, vpd + ec, EC_LEN);
2928 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
2929 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
2931 i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2];
2932 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
2933 strstrip((char *)p->pn);
2934 i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2];
2935 memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
2936 strstrip((char *)p->na);
2941 /* serial flash and firmware constants and flash config file constants */
2943 SF_ATTEMPTS = 10, /* max retries for SF operations */
2945 /* flash command opcodes */
2946 SF_PROG_PAGE = 2, /* program page */
2947 SF_WR_DISABLE = 4, /* disable writes */
2948 SF_RD_STATUS = 5, /* read status register */
2949 SF_WR_ENABLE = 6, /* enable writes */
2950 SF_RD_DATA_FAST = 0xb, /* read flash */
2951 SF_RD_ID = 0x9f, /* read ID */
2952 SF_ERASE_SECTOR = 0xd8, /* erase sector */
2956 * sf1_read - read data from the serial flash
2957 * @adapter: the adapter
2958 * @byte_cnt: number of bytes to read
2959 * @cont: whether another operation will be chained
2960 * @lock: whether to lock SF for PL access only
2961 * @valp: where to store the read data
2963 * Reads up to 4 bytes of data from the serial flash. The location of
2964 * the read needs to be specified prior to calling this by issuing the
2965 * appropriate commands to the serial flash.
2967 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
2968 int lock, u32 *valp)
2972 if (!byte_cnt || byte_cnt > 4)
2974 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
2976 t4_write_reg(adapter, A_SF_OP,
2977 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
2978 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
2980 *valp = t4_read_reg(adapter, A_SF_DATA);
2985 * sf1_write - write data to the serial flash
2986 * @adapter: the adapter
2987 * @byte_cnt: number of bytes to write
2988 * @cont: whether another operation will be chained
2989 * @lock: whether to lock SF for PL access only
2990 * @val: value to write
2992 * Writes up to 4 bytes of data to the serial flash. The location of
2993 * the write needs to be specified prior to calling this by issuing the
2994 * appropriate commands to the serial flash.
2996 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
2999 if (!byte_cnt || byte_cnt > 4)
3001 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3003 t4_write_reg(adapter, A_SF_DATA, val);
3004 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
3005 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
3006 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3010 * flash_wait_op - wait for a flash operation to complete
3011 * @adapter: the adapter
3012 * @attempts: max number of polls of the status register
3013 * @delay: delay between polls in ms
3015 * Wait for a flash operation to complete by polling the status register.
3017 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
3023 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
3024 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
3028 if (--attempts == 0)
3036 * t4_read_flash - read words from serial flash
3037 * @adapter: the adapter
3038 * @addr: the start address for the read
3039 * @nwords: how many 32-bit words to read
3040 * @data: where to store the read data
3041 * @byte_oriented: whether to store data as bytes or as words
3043 * Read the specified number of 32-bit words from the serial flash.
3044 * If @byte_oriented is set the read data is stored as a byte array
3045 * (i.e., big-endian), otherwise as 32-bit words in the platform's
3046 * natural endianness.
3048 int t4_read_flash(struct adapter *adapter, unsigned int addr,
3049 unsigned int nwords, u32 *data, int byte_oriented)
3053 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
3056 addr = swab32(addr) | SF_RD_DATA_FAST;
3058 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
3059 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
3062 for ( ; nwords; nwords--, data++) {
3063 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
3065 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3069 *data = (__force __u32)(cpu_to_be32(*data));
3075 * t4_write_flash - write up to a page of data to the serial flash
3076 * @adapter: the adapter
3077 * @addr: the start address to write
3078 * @n: length of data to write in bytes
3079 * @data: the data to write
3080 * @byte_oriented: whether to store data as bytes or as words
3082 * Writes up to a page of data (256 bytes) to the serial flash starting
3083 * at the given address. All the data must be written to the same page.
3084 * If @byte_oriented is set the write data is stored as byte stream
3085 * (i.e. matches what on disk), otherwise in big-endian.
3087 int t4_write_flash(struct adapter *adapter, unsigned int addr,
3088 unsigned int n, const u8 *data, int byte_oriented)
3091 u32 buf[SF_PAGE_SIZE / 4];
3092 unsigned int i, c, left, val, offset = addr & 0xff;
3094 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
3097 val = swab32(addr) | SF_PROG_PAGE;
3099 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3100 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
3103 for (left = n; left; left -= c) {
3105 for (val = 0, i = 0; i < c; ++i)
3106 val = (val << 8) + *data++;
3109 val = cpu_to_be32(val);
3111 ret = sf1_write(adapter, c, c != left, 1, val);
3115 ret = flash_wait_op(adapter, 8, 1);
3119 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3121 /* Read the page to verify the write succeeded */
3122 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
3127 if (memcmp(data - n, (u8 *)buf + offset, n)) {
3129 "failed to correctly write the flash page at %#x\n",
3136 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3141 * t4_get_fw_version - read the firmware version
3142 * @adapter: the adapter
3143 * @vers: where to place the version
3145 * Reads the FW version from flash.
3147 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
3149 return t4_read_flash(adapter, FLASH_FW_START +
3150 offsetof(struct fw_hdr, fw_ver), 1,
3155 * t4_get_tp_version - read the TP microcode version
3156 * @adapter: the adapter
3157 * @vers: where to place the version
3159 * Reads the TP microcode version from flash.
3161 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
3163 return t4_read_flash(adapter, FLASH_FW_START +
3164 offsetof(struct fw_hdr, tp_microcode_ver),
3169 * t4_get_exprom_version - return the Expansion ROM version (if any)
3170 * @adapter: the adapter
3171 * @vers: where to place the version
3173 * Reads the Expansion ROM header from FLASH and returns the version
3174 * number (if present) through the @vers return value pointer. We return
3175 * this in the Firmware Version Format since it's convenient. Return
3176 * 0 on success, -ENOENT if no Expansion ROM is present.
3178 int t4_get_exprom_version(struct adapter *adap, u32 *vers)
3180 struct exprom_header {
3181 unsigned char hdr_arr[16]; /* must start with 0x55aa */
3182 unsigned char hdr_ver[4]; /* Expansion ROM version */
3184 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
3188 ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
3189 ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
3194 hdr = (struct exprom_header *)exprom_header_buf;
3195 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
3198 *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
3199 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
3200 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
3201 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
3206 * t4_flash_erase_sectors - erase a range of flash sectors
3207 * @adapter: the adapter
3208 * @start: the first sector to erase
3209 * @end: the last sector to erase
3211 * Erases the sectors in the given inclusive range.
3213 int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
3217 if (end >= adapter->params.sf_nsec)
3220 while (start <= end) {
3221 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3222 (ret = sf1_write(adapter, 4, 0, 1,
3223 SF_ERASE_SECTOR | (start << 8))) != 0 ||
3224 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
3226 "erase of flash sector %d failed, error %d\n",
3232 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3237 * t4_flash_cfg_addr - return the address of the flash configuration file
3238 * @adapter: the adapter
3240 * Return the address within the flash where the Firmware Configuration
3241 * File is stored, or an error if the device FLASH is too small to contain
3242 * a Firmware Configuration File.
3244 int t4_flash_cfg_addr(struct adapter *adapter)
3247 * If the device FLASH isn't large enough to hold a Firmware
3248 * Configuration File, return an error.
3250 if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
3253 return FLASH_CFG_START;
3257 * Return TRUE if the specified firmware matches the adapter. I.e. T4
3258 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
3259 * and emit an error message for mismatched firmware to save our caller the
3262 static int t4_fw_matches_chip(struct adapter *adap,
3263 const struct fw_hdr *hdr)
3266 * The expression below will return FALSE for any unsupported adapter
3267 * which will keep us "honest" in the future ...
3269 if ((is_t4(adap) && hdr->chip == FW_HDR_CHIP_T4) ||
3270 (is_t5(adap) && hdr->chip == FW_HDR_CHIP_T5) ||
3271 (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6))
3275 "FW image (%d) is not suitable for this adapter (%d)\n",
3276 hdr->chip, chip_id(adap));
3281 * t4_load_fw - download firmware
3282 * @adap: the adapter
3283 * @fw_data: the firmware image to write
3286 * Write the supplied firmware image to the card's serial flash.
3288 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3293 u8 first_page[SF_PAGE_SIZE];
3294 const u32 *p = (const u32 *)fw_data;
3295 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
3296 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
3297 unsigned int fw_start_sec;
3298 unsigned int fw_start;
3299 unsigned int fw_size;
3301 if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) {
3302 fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
3303 fw_start = FLASH_FWBOOTSTRAP_START;
3304 fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
3306 fw_start_sec = FLASH_FW_START_SEC;
3307 fw_start = FLASH_FW_START;
3308 fw_size = FLASH_FW_MAX_SIZE;
3312 CH_ERR(adap, "FW image has no data\n");
3317 "FW image size not multiple of 512 bytes\n");
3320 if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) {
3322 "FW image size differs from size in FW header\n");
3325 if (size > fw_size) {
3326 CH_ERR(adap, "FW image too large, max is %u bytes\n",
3330 if (!t4_fw_matches_chip(adap, hdr))
3333 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
3334 csum += be32_to_cpu(p[i]);
3336 if (csum != 0xffffffff) {
3338 "corrupted firmware image, checksum %#x\n", csum);
3342 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
3343 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
3348 * We write the correct version at the end so the driver can see a bad
3349 * version if the FW write fails. Start by writing a copy of the
3350 * first page with a bad version.
3352 memcpy(first_page, fw_data, SF_PAGE_SIZE);
3353 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
3354 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
3359 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
3360 addr += SF_PAGE_SIZE;
3361 fw_data += SF_PAGE_SIZE;
3362 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
3367 ret = t4_write_flash(adap,
3368 fw_start + offsetof(struct fw_hdr, fw_ver),
3369 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
3372 CH_ERR(adap, "firmware download failed, error %d\n",
3378 * t4_fwcache - firmware cache operation
3379 * @adap: the adapter
3380 * @op : the operation (flush or flush and invalidate)
3382 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
3384 struct fw_params_cmd c;
3386 memset(&c, 0, sizeof(c));
3388 cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
3389 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
3390 V_FW_PARAMS_CMD_PFN(adap->pf) |
3391 V_FW_PARAMS_CMD_VFN(0));
3392 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3394 cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3395 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE));
3396 c.param[0].val = (__force __be32)op;
3398 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
3401 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
3402 unsigned int *pif_req_wrptr,
3403 unsigned int *pif_rsp_wrptr)
3406 u32 cfg, val, req, rsp;
3408 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
3409 if (cfg & F_LADBGEN)
3410 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
3412 val = t4_read_reg(adap, A_CIM_DEBUGSTS);
3413 req = G_POLADBGWRPTR(val);
3414 rsp = G_PILADBGWRPTR(val);
3416 *pif_req_wrptr = req;
3418 *pif_rsp_wrptr = rsp;
3420 for (i = 0; i < CIM_PIFLA_SIZE; i++) {
3421 for (j = 0; j < 6; j++) {
3422 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
3423 V_PILADBGRDPTR(rsp));
3424 *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
3425 *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
3429 req = (req + 2) & M_POLADBGRDPTR;
3430 rsp = (rsp + 2) & M_PILADBGRDPTR;
3432 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
3435 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
3440 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
3441 if (cfg & F_LADBGEN)
3442 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
3444 for (i = 0; i < CIM_MALA_SIZE; i++) {
3445 for (j = 0; j < 5; j++) {
3447 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
3448 V_PILADBGRDPTR(idx));
3449 *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
3450 *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
3453 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
3456 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
3460 for (i = 0; i < 8; i++) {
3461 u32 *p = la_buf + i;
3463 t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
3464 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
3465 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
3466 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
3467 *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
3471 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
3472 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
3473 FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
3476 * t4_link_l1cfg - apply link configuration to MAC/PHY
3477 * @phy: the PHY to setup
3478 * @mac: the MAC to setup
3479 * @lc: the requested link configuration
3481 * Set up a port's MAC and PHY according to a desired link configuration.
3482 * - If the PHY can auto-negotiate first decide what to advertise, then
3483 * enable/disable auto-negotiation as desired, and reset.
3484 * - If the PHY does not auto-negotiate just reset it.
3485 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
3486 * otherwise do it later based on the outcome of auto-negotiation.
3488 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
3489 struct link_config *lc)
3491 struct fw_port_cmd c;
3492 unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
3495 if (lc->requested_fc & PAUSE_RX)
3496 fc |= FW_PORT_CAP_FC_RX;
3497 if (lc->requested_fc & PAUSE_TX)
3498 fc |= FW_PORT_CAP_FC_TX;
3500 memset(&c, 0, sizeof(c));
3501 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
3502 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
3503 V_FW_PORT_CMD_PORTID(port));
3505 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
3508 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
3509 c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
3511 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
3512 } else if (lc->autoneg == AUTONEG_DISABLE) {
3513 c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | mdi);
3514 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
3516 c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | mdi);
3518 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3522 * t4_restart_aneg - restart autonegotiation
3523 * @adap: the adapter
3524 * @mbox: mbox to use for the FW command
3525 * @port: the port id
3527 * Restarts autonegotiation for the selected port.
3529 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
3531 struct fw_port_cmd c;
3533 memset(&c, 0, sizeof(c));
3534 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
3535 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
3536 V_FW_PORT_CMD_PORTID(port));
3538 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
3540 c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
3541 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3544 typedef void (*int_handler_t)(struct adapter *adap);
3547 unsigned int mask; /* bits to check in interrupt status */
3548 const char *msg; /* message to print or NULL */
3549 short stat_idx; /* stat counter to increment or -1 */
3550 unsigned short fatal; /* whether the condition reported is fatal */
3551 int_handler_t int_handler; /* platform-specific int handler */
3555 * t4_handle_intr_status - table driven interrupt handler
3556 * @adapter: the adapter that generated the interrupt
3557 * @reg: the interrupt status register to process
3558 * @acts: table of interrupt actions
3560 * A table driven interrupt handler that applies a set of masks to an
3561 * interrupt status word and performs the corresponding actions if the
3562 * interrupts described by the mask have occurred. The actions include
3563 * optionally emitting a warning or alert message. The table is terminated
3564 * by an entry specifying mask 0. Returns the number of fatal interrupt
3567 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
3568 const struct intr_info *acts)
3571 unsigned int mask = 0;
3572 unsigned int status = t4_read_reg(adapter, reg);
3574 for ( ; acts->mask; ++acts) {
3575 if (!(status & acts->mask))
3579 CH_ALERT(adapter, "%s (0x%x)\n", acts->msg,
3580 status & acts->mask);
3581 } else if (acts->msg)
3582 CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n", acts->msg,
3583 status & acts->mask);
3584 if (acts->int_handler)
3585 acts->int_handler(adapter);
3589 if (status) /* clear processed interrupts */
3590 t4_write_reg(adapter, reg, status);
3595 * Interrupt handler for the PCIE module.
3597 static void pcie_intr_handler(struct adapter *adapter)
3599 static const struct intr_info sysbus_intr_info[] = {
3600 { F_RNPP, "RXNP array parity error", -1, 1 },
3601 { F_RPCP, "RXPC array parity error", -1, 1 },
3602 { F_RCIP, "RXCIF array parity error", -1, 1 },
3603 { F_RCCP, "Rx completions control array parity error", -1, 1 },
3604 { F_RFTP, "RXFT array parity error", -1, 1 },
3607 static const struct intr_info pcie_port_intr_info[] = {
3608 { F_TPCP, "TXPC array parity error", -1, 1 },
3609 { F_TNPP, "TXNP array parity error", -1, 1 },
3610 { F_TFTP, "TXFT array parity error", -1, 1 },
3611 { F_TCAP, "TXCA array parity error", -1, 1 },
3612 { F_TCIP, "TXCIF array parity error", -1, 1 },
3613 { F_RCAP, "RXCA array parity error", -1, 1 },
3614 { F_OTDD, "outbound request TLP discarded", -1, 1 },
3615 { F_RDPE, "Rx data parity error", -1, 1 },
3616 { F_TDUE, "Tx uncorrectable data error", -1, 1 },
3619 static const struct intr_info pcie_intr_info[] = {
3620 { F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
3621 { F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
3622 { F_MSIDATAPERR, "MSI data parity error", -1, 1 },
3623 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
3624 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
3625 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
3626 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
3627 { F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
3628 { F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
3629 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
3630 { F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
3631 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
3632 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
3633 { F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
3634 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
3635 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
3636 { F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
3637 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
3638 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
3639 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
3640 { F_FIDPERR, "PCI FID parity error", -1, 1 },
3641 { F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
3642 { F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
3643 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
3644 { F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
3645 { F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
3646 { F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
3647 { F_PCIESINT, "PCI core secondary fault", -1, 1 },
3648 { F_PCIEPINT, "PCI core primary fault", -1, 1 },
3649 { F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
3654 static const struct intr_info t5_pcie_intr_info[] = {
3655 { F_MSTGRPPERR, "Master Response Read Queue parity error",
3657 { F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
3658 { F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
3659 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
3660 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
3661 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
3662 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
3663 { F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
3665 { F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
3667 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
3668 { F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
3669 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
3670 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
3671 { F_DREQWRPERR, "PCI DMA channel write request parity error",
3673 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
3674 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
3675 { F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
3676 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
3677 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
3678 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
3679 { F_FIDPERR, "PCI FID parity error", -1, 1 },
3680 { F_VFIDPERR, "PCI INTx clear parity error", -1, 1 },
3681 { F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
3682 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
3683 { F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
3685 { F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
3687 { F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
3688 { F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
3689 { F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
3690 { F_READRSPERR, "Outbound read error", -1,
3698 fat = t4_handle_intr_status(adapter,
3699 A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
3701 t4_handle_intr_status(adapter,
3702 A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
3703 pcie_port_intr_info) +
3704 t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
3707 fat = t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
3710 t4_fatal_err(adapter);
3714 * TP interrupt handler.
3716 static void tp_intr_handler(struct adapter *adapter)
3718 static const struct intr_info tp_intr_info[] = {
3719 { 0x3fffffff, "TP parity error", -1, 1 },
3720 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
3724 if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
3725 t4_fatal_err(adapter);
3729 * SGE interrupt handler.
3731 static void sge_intr_handler(struct adapter *adapter)
3736 static const struct intr_info sge_intr_info[] = {
3737 { F_ERR_CPL_EXCEED_IQE_SIZE,
3738 "SGE received CPL exceeding IQE size", -1, 1 },
3739 { F_ERR_INVALID_CIDX_INC,
3740 "SGE GTS CIDX increment too large", -1, 0 },
3741 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
3742 { F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
3743 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
3744 "SGE IQID > 1023 received CPL for FL", -1, 0 },
3745 { F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
3747 { F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
3749 { F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
3751 { F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
3753 { F_ERR_ING_CTXT_PRIO,
3754 "SGE too many priority ingress contexts", -1, 0 },
3755 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
3756 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
3760 static const struct intr_info t4t5_sge_intr_info[] = {
3761 { F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
3762 { F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
3763 { F_ERR_EGR_CTXT_PRIO,
3764 "SGE too many priority egress contexts", -1, 0 },
3769 * For now, treat below interrupts as fatal so that we disable SGE and
3770 * get better debug */
3771 static const struct intr_info t6_sge_intr_info[] = {
3772 { F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1,
3773 "SGE PCIe error for a DBP thread", -1, 1 },
3775 "SGE Actual WRE packet is less than advertized length",
3780 v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
3781 ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
3783 CH_ALERT(adapter, "SGE parity error (%#llx)\n",
3784 (unsigned long long)v);
3785 t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
3786 t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
3789 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
3790 if (chip_id(adapter) <= CHELSIO_T5)
3791 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
3792 t4t5_sge_intr_info);
3794 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
3797 err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
3798 if (err & F_ERROR_QID_VALID) {
3799 CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
3800 if (err & F_UNCAPTURED_ERROR)
3801 CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
3802 t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
3803 F_UNCAPTURED_ERROR);
3807 t4_fatal_err(adapter);
3810 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
3811 F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
3812 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
3813 F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
3816 * CIM interrupt handler.
3818 static void cim_intr_handler(struct adapter *adapter)
3820 static const struct intr_info cim_intr_info[] = {
3821 { F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
3822 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
3823 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
3824 { F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
3825 { F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
3826 { F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
3827 { F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
3830 static const struct intr_info cim_upintr_info[] = {
3831 { F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
3832 { F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
3833 { F_ILLWRINT, "CIM illegal write", -1, 1 },
3834 { F_ILLRDINT, "CIM illegal read", -1, 1 },
3835 { F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
3836 { F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
3837 { F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
3838 { F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
3839 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
3840 { F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
3841 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
3842 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
3843 { F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
3844 { F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
3845 { F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
3846 { F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
3847 { F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
3848 { F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
3849 { F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
3850 { F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
3851 { F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
3852 { F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
3853 { F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
3854 { F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
3855 { F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
3856 { F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
3857 { F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
3858 { F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
3863 if (t4_read_reg(adapter, A_PCIE_FW) & F_PCIE_FW_ERR)
3864 t4_report_fw_error(adapter);
3866 fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
3868 t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
3871 t4_fatal_err(adapter);
3875 * ULP RX interrupt handler.
3877 static void ulprx_intr_handler(struct adapter *adapter)
3879 static const struct intr_info ulprx_intr_info[] = {
3880 { F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
3881 { F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
3882 { 0x7fffff, "ULPRX parity error", -1, 1 },
3886 if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
3887 t4_fatal_err(adapter);
3891 * ULP TX interrupt handler.
3893 static void ulptx_intr_handler(struct adapter *adapter)
3895 static const struct intr_info ulptx_intr_info[] = {
3896 { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
3898 { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
3900 { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
3902 { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
3904 { 0xfffffff, "ULPTX parity error", -1, 1 },
3908 if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
3909 t4_fatal_err(adapter);
3913 * PM TX interrupt handler.
3915 static void pmtx_intr_handler(struct adapter *adapter)
3917 static const struct intr_info pmtx_intr_info[] = {
3918 { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
3919 { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
3920 { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
3921 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
3922 { 0xffffff0, "PMTX framing error", -1, 1 },
3923 { F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
3924 { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
3926 { F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
3927 { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
3931 if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
3932 t4_fatal_err(adapter);
3936 * PM RX interrupt handler.
3938 static void pmrx_intr_handler(struct adapter *adapter)
3940 static const struct intr_info pmrx_intr_info[] = {
3941 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
3942 { 0x3ffff0, "PMRX framing error", -1, 1 },
3943 { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
3944 { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
3946 { F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
3947 { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
3951 if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
3952 t4_fatal_err(adapter);
3956 * CPL switch interrupt handler.
3958 static void cplsw_intr_handler(struct adapter *adapter)
3960 static const struct intr_info cplsw_intr_info[] = {
3961 { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
3962 { F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
3963 { F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
3964 { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
3965 { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
3966 { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
3970 if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
3971 t4_fatal_err(adapter);
3975 * LE interrupt handler.
3977 static void le_intr_handler(struct adapter *adap)
3979 unsigned int chip_ver = chip_id(adap);
3980 static const struct intr_info le_intr_info[] = {
3981 { F_LIPMISS, "LE LIP miss", -1, 0 },
3982 { F_LIP0, "LE 0 LIP error", -1, 0 },
3983 { F_PARITYERR, "LE parity error", -1, 1 },
3984 { F_UNKNOWNCMD, "LE unknown command", -1, 1 },
3985 { F_REQQPARERR, "LE request queue parity error", -1, 1 },
3989 static const struct intr_info t6_le_intr_info[] = {
3990 { F_T6_LIPMISS, "LE LIP miss", -1, 0 },
3991 { F_T6_LIP0, "LE 0 LIP error", -1, 0 },
3992 { F_TCAMINTPERR, "LE parity error", -1, 1 },
3993 { F_T6_UNKNOWNCMD, "LE unknown command", -1, 1 },
3994 { F_SSRAMINTPERR, "LE request queue parity error", -1, 1 },
3998 if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE,
3999 (chip_ver <= CHELSIO_T5) ?
4000 le_intr_info : t6_le_intr_info))
4005 * MPS interrupt handler.
4007 static void mps_intr_handler(struct adapter *adapter)
4009 static const struct intr_info mps_rx_intr_info[] = {
4010 { 0xffffff, "MPS Rx parity error", -1, 1 },
4013 static const struct intr_info mps_tx_intr_info[] = {
4014 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
4015 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4016 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
4018 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
4020 { F_BUBBLE, "MPS Tx underflow", -1, 1 },
4021 { F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
4022 { F_FRMERR, "MPS Tx framing error", -1, 1 },
4025 static const struct intr_info mps_trc_intr_info[] = {
4026 { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
4027 { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
4029 { F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
4032 static const struct intr_info mps_stat_sram_intr_info[] = {
4033 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
4036 static const struct intr_info mps_stat_tx_intr_info[] = {
4037 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
4040 static const struct intr_info mps_stat_rx_intr_info[] = {
4041 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
4044 static const struct intr_info mps_cls_intr_info[] = {
4045 { F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
4046 { F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
4047 { F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
4053 fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
4055 t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
4057 t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
4058 mps_trc_intr_info) +
4059 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
4060 mps_stat_sram_intr_info) +
4061 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
4062 mps_stat_tx_intr_info) +
4063 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
4064 mps_stat_rx_intr_info) +
4065 t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
4068 t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
4069 t4_read_reg(adapter, A_MPS_INT_CAUSE); /* flush */
4071 t4_fatal_err(adapter);
4074 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | \
4078 * EDC/MC interrupt handler.
4080 static void mem_intr_handler(struct adapter *adapter, int idx)
4082 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
4084 unsigned int addr, cnt_addr, v;
4086 if (idx <= MEM_EDC1) {
4087 addr = EDC_REG(A_EDC_INT_CAUSE, idx);
4088 cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
4089 } else if (idx == MEM_MC) {
4090 if (is_t4(adapter)) {
4091 addr = A_MC_INT_CAUSE;
4092 cnt_addr = A_MC_ECC_STATUS;
4094 addr = A_MC_P_INT_CAUSE;
4095 cnt_addr = A_MC_P_ECC_STATUS;
4098 addr = MC_REG(A_MC_P_INT_CAUSE, 1);
4099 cnt_addr = MC_REG(A_MC_P_ECC_STATUS, 1);
4102 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
4103 if (v & F_PERR_INT_CAUSE)
4104 CH_ALERT(adapter, "%s FIFO parity error\n",
4106 if (v & F_ECC_CE_INT_CAUSE) {
4107 u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
4109 t4_edc_err_read(adapter, idx);
4111 t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
4112 CH_WARN_RATELIMIT(adapter,
4113 "%u %s correctable ECC data error%s\n",
4114 cnt, name[idx], cnt > 1 ? "s" : "");
4116 if (v & F_ECC_UE_INT_CAUSE)
4118 "%s uncorrectable ECC data error\n", name[idx]);
4120 t4_write_reg(adapter, addr, v);
4121 if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
4122 t4_fatal_err(adapter);
4126 * MA interrupt handler.
4128 static void ma_intr_handler(struct adapter *adapter)
4130 u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
4132 if (status & F_MEM_PERR_INT_CAUSE) {
4134 "MA parity error, parity status %#x\n",
4135 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS1));
4138 "MA parity error, parity status %#x\n",
4139 t4_read_reg(adapter,
4140 A_MA_PARITY_ERROR_STATUS2));
4142 if (status & F_MEM_WRAP_INT_CAUSE) {
4143 v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
4144 CH_ALERT(adapter, "MA address wrap-around error by "
4145 "client %u to address %#x\n",
4146 G_MEM_WRAP_CLIENT_NUM(v),
4147 G_MEM_WRAP_ADDRESS(v) << 4);
4149 t4_write_reg(adapter, A_MA_INT_CAUSE, status);
4150 t4_fatal_err(adapter);
4154 * SMB interrupt handler.
4156 static void smb_intr_handler(struct adapter *adap)
4158 static const struct intr_info smb_intr_info[] = {
4159 { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
4160 { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
4161 { F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
4165 if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
4170 * NC-SI interrupt handler.
4172 static void ncsi_intr_handler(struct adapter *adap)
4174 static const struct intr_info ncsi_intr_info[] = {
4175 { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
4176 { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
4177 { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
4178 { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
4182 if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
4187 * XGMAC interrupt handler.
4189 static void xgmac_intr_handler(struct adapter *adap, int port)
4191 u32 v, int_cause_reg;
4194 int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
4196 int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
4198 v = t4_read_reg(adap, int_cause_reg);
4200 v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR);
4204 if (v & F_TXFIFO_PRTY_ERR)
4205 CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n",
4207 if (v & F_RXFIFO_PRTY_ERR)
4208 CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n",
4210 t4_write_reg(adap, int_cause_reg, v);
4215 * PL interrupt handler.
4217 static void pl_intr_handler(struct adapter *adap)
4219 static const struct intr_info pl_intr_info[] = {
4220 { F_FATALPERR, "Fatal parity error", -1, 1 },
4221 { F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
4225 static const struct intr_info t5_pl_intr_info[] = {
4226 { F_FATALPERR, "Fatal parity error", -1, 1 },
4230 if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE,
4232 pl_intr_info : t5_pl_intr_info))
4236 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
4239 * t4_slow_intr_handler - control path interrupt handler
4240 * @adapter: the adapter
4242 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
4243 * The designation 'slow' is because it involves register reads, while
4244 * data interrupts typically don't involve any MMIOs.
4246 int t4_slow_intr_handler(struct adapter *adapter)
4248 u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
4250 if (!(cause & GLBL_INTR_MASK))
4253 cim_intr_handler(adapter);
4255 mps_intr_handler(adapter);
4257 ncsi_intr_handler(adapter);
4259 pl_intr_handler(adapter);
4261 smb_intr_handler(adapter);
4263 xgmac_intr_handler(adapter, 0);
4265 xgmac_intr_handler(adapter, 1);
4267 xgmac_intr_handler(adapter, 2);
4269 xgmac_intr_handler(adapter, 3);
4271 pcie_intr_handler(adapter);
4273 mem_intr_handler(adapter, MEM_MC);
4274 if (is_t5(adapter) && (cause & F_MC1))
4275 mem_intr_handler(adapter, MEM_MC1);
4277 mem_intr_handler(adapter, MEM_EDC0);
4279 mem_intr_handler(adapter, MEM_EDC1);
4281 le_intr_handler(adapter);
4283 tp_intr_handler(adapter);
4285 ma_intr_handler(adapter);
4286 if (cause & F_PM_TX)
4287 pmtx_intr_handler(adapter);
4288 if (cause & F_PM_RX)
4289 pmrx_intr_handler(adapter);
4290 if (cause & F_ULP_RX)
4291 ulprx_intr_handler(adapter);
4292 if (cause & F_CPL_SWITCH)
4293 cplsw_intr_handler(adapter);
4295 sge_intr_handler(adapter);
4296 if (cause & F_ULP_TX)
4297 ulptx_intr_handler(adapter);
4299 /* Clear the interrupts just processed for which we are the master. */
4300 t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
4301 (void)t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
4306 * t4_intr_enable - enable interrupts
4307 * @adapter: the adapter whose interrupts should be enabled
4309 * Enable PF-specific interrupts for the calling function and the top-level
4310 * interrupt concentrator for global interrupts. Interrupts are already
4311 * enabled at each module, here we just enable the roots of the interrupt
4314 * Note: this function should be called only when the driver manages
4315 * non PF-specific interrupts from the various HW modules. Only one PCI
4316 * function at a time should be doing this.
4318 void t4_intr_enable(struct adapter *adapter)
4321 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4322 u32 pf = (chip_id(adapter) <= CHELSIO_T5
4323 ? G_SOURCEPF(whoami)
4324 : G_T6_SOURCEPF(whoami));
4326 if (chip_id(adapter) <= CHELSIO_T5)
4327 val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
4329 val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN;
4330 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
4331 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
4332 F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR |
4333 F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
4334 F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
4335 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
4336 F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val);
4337 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
4338 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
4342 * t4_intr_disable - disable interrupts
4343 * @adapter: the adapter whose interrupts should be disabled
4345 * Disable interrupts. We only disable the top-level interrupt
4346 * concentrators. The caller must be a PCI function managing global
4349 void t4_intr_disable(struct adapter *adapter)
4351 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4352 u32 pf = (chip_id(adapter) <= CHELSIO_T5
4353 ? G_SOURCEPF(whoami)
4354 : G_T6_SOURCEPF(whoami));
4356 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
4357 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
4361 * t4_intr_clear - clear all interrupts
4362 * @adapter: the adapter whose interrupts should be cleared
4364 * Clears all interrupts. The caller must be a PCI function managing
4365 * global interrupts.
4367 void t4_intr_clear(struct adapter *adapter)
4369 static const unsigned int cause_reg[] = {
4370 A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
4371 A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
4372 A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS1, A_MA_INT_CAUSE,
4373 A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
4374 A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
4375 MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
4377 A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE,
4378 A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE,
4379 A_MPS_RX_PERR_INT_CAUSE,
4381 MYPF_REG(A_PL_PF_INT_CAUSE),
4388 for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
4389 t4_write_reg(adapter, cause_reg[i], 0xffffffff);
4391 t4_write_reg(adapter, is_t4(adapter) ? A_MC_INT_CAUSE :
4392 A_MC_P_INT_CAUSE, 0xffffffff);
4394 if (is_t4(adapter)) {
4395 t4_write_reg(adapter, A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
4397 t4_write_reg(adapter, A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
4400 t4_write_reg(adapter, A_MA_PARITY_ERROR_STATUS2, 0xffffffff);
4402 t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
4403 (void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
4407 * hash_mac_addr - return the hash value of a MAC address
4408 * @addr: the 48-bit Ethernet MAC address
4410 * Hashes a MAC address according to the hash function used by HW inexact
4411 * (hash) address matching.
4413 static int hash_mac_addr(const u8 *addr)
4415 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
4416 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
4424 * t4_config_rss_range - configure a portion of the RSS mapping table
4425 * @adapter: the adapter
4426 * @mbox: mbox to use for the FW command
4427 * @viid: virtual interface whose RSS subtable is to be written
4428 * @start: start entry in the table to write
4429 * @n: how many table entries to write
4430 * @rspq: values for the "response queue" (Ingress Queue) lookup table
4431 * @nrspq: number of values in @rspq
4433 * Programs the selected part of the VI's RSS mapping table with the
4434 * provided values. If @nrspq < @n the supplied values are used repeatedly
4435 * until the full table range is populated.
4437 * The caller must ensure the values in @rspq are in the range allowed for
4440 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
4441 int start, int n, const u16 *rspq, unsigned int nrspq)
4444 const u16 *rsp = rspq;
4445 const u16 *rsp_end = rspq + nrspq;
4446 struct fw_rss_ind_tbl_cmd cmd;
4448 memset(&cmd, 0, sizeof(cmd));
4449 cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
4450 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4451 V_FW_RSS_IND_TBL_CMD_VIID(viid));
4452 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
4455 * Each firmware RSS command can accommodate up to 32 RSS Ingress
4456 * Queue Identifiers. These Ingress Queue IDs are packed three to
4457 * a 32-bit word as 10-bit values with the upper remaining 2 bits
4461 int nq = min(n, 32);
4463 __be32 *qp = &cmd.iq0_to_iq2;
4466 * Set up the firmware RSS command header to send the next
4467 * "nq" Ingress Queue IDs to the firmware.
4469 cmd.niqid = cpu_to_be16(nq);
4470 cmd.startidx = cpu_to_be16(start);
4473 * "nq" more done for the start of the next loop.
4479 * While there are still Ingress Queue IDs to stuff into the
4480 * current firmware RSS command, retrieve them from the
4481 * Ingress Queue ID array and insert them into the command.
4485 * Grab up to the next 3 Ingress Queue IDs (wrapping
4486 * around the Ingress Queue ID array if necessary) and
4487 * insert them into the firmware RSS command at the
4488 * current 3-tuple position within the commad.
4492 int nqbuf = min(3, nq);
4495 qbuf[0] = qbuf[1] = qbuf[2] = 0;
4496 while (nqbuf && nq_packed < 32) {
4503 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
4504 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
4505 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
4509 * Send this portion of the RRS table update to the firmware;
4510 * bail out on any errors.
4512 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
4520 * t4_config_glbl_rss - configure the global RSS mode
4521 * @adapter: the adapter
4522 * @mbox: mbox to use for the FW command
4523 * @mode: global RSS mode
4524 * @flags: mode-specific flags
4526 * Sets the global RSS mode.
4528 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
4531 struct fw_rss_glb_config_cmd c;
4533 memset(&c, 0, sizeof(c));
4534 c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
4535 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
4536 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4537 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
4538 c.u.manual.mode_pkd =
4539 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
4540 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
4541 c.u.basicvirtual.mode_pkd =
4542 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
4543 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
4546 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
4550 * t4_config_vi_rss - configure per VI RSS settings
4551 * @adapter: the adapter
4552 * @mbox: mbox to use for the FW command
4555 * @defq: id of the default RSS queue for the VI.
4557 * Configures VI-specific RSS properties.
4559 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
4560 unsigned int flags, unsigned int defq)
4562 struct fw_rss_vi_config_cmd c;
4564 memset(&c, 0, sizeof(c));
4565 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
4566 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4567 V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
4568 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4569 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
4570 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
4571 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
4574 /* Read an RSS table row */
4575 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
4577 t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
4578 return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
4583 * t4_read_rss - read the contents of the RSS mapping table
4584 * @adapter: the adapter
4585 * @map: holds the contents of the RSS mapping table
4587 * Reads the contents of the RSS hash->queue mapping table.
4589 int t4_read_rss(struct adapter *adapter, u16 *map)
4594 for (i = 0; i < RSS_NENTRIES / 2; ++i) {
4595 ret = rd_rss_row(adapter, i, &val);
4598 *map++ = G_LKPTBLQUEUE0(val);
4599 *map++ = G_LKPTBLQUEUE1(val);
4605 * t4_fw_tp_pio_rw - Access TP PIO through LDST
4606 * @adap: the adapter
4607 * @vals: where the indirect register values are stored/written
4608 * @nregs: how many indirect registers to read/write
4609 * @start_idx: index of first indirect register to read/write
4610 * @rw: Read (1) or Write (0)
4612 * Access TP PIO registers through LDST
4614 void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
4615 unsigned int start_index, unsigned int rw)
4618 int cmd = FW_LDST_ADDRSPC_TP_PIO;
4619 struct fw_ldst_cmd c;
4621 for (i = 0 ; i < nregs; i++) {
4622 memset(&c, 0, sizeof(c));
4623 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
4625 (rw ? F_FW_CMD_READ :
4627 V_FW_LDST_CMD_ADDRSPACE(cmd));
4628 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
4630 c.u.addrval.addr = cpu_to_be32(start_index + i);
4631 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
4632 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4635 vals[i] = be32_to_cpu(c.u.addrval.val);
4641 * t4_read_rss_key - read the global RSS key
4642 * @adap: the adapter
4643 * @key: 10-entry array holding the 320-bit RSS key
4645 * Reads the global 320-bit RSS key.
4647 void t4_read_rss_key(struct adapter *adap, u32 *key)
4649 if (t4_use_ldst(adap))
4650 t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 1);
4652 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
4653 A_TP_RSS_SECRET_KEY0);
4657 * t4_write_rss_key - program one of the RSS keys
4658 * @adap: the adapter
4659 * @key: 10-entry array holding the 320-bit RSS key
4660 * @idx: which RSS key to write
4662 * Writes one of the RSS keys with the given 320-bit value. If @idx is
4663 * 0..15 the corresponding entry in the RSS key table is written,
4664 * otherwise the global RSS key is written.
4666 void t4_write_rss_key(struct adapter *adap, u32 *key, int idx)
4668 u8 rss_key_addr_cnt = 16;
4669 u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
4672 * T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
4673 * allows access to key addresses 16-63 by using KeyWrAddrX
4674 * as index[5:4](upper 2) into key table
4676 if ((chip_id(adap) > CHELSIO_T5) &&
4677 (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
4678 rss_key_addr_cnt = 32;
4680 if (t4_use_ldst(adap))
4681 t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 0);
4683 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
4684 A_TP_RSS_SECRET_KEY0);
4686 if (idx >= 0 && idx < rss_key_addr_cnt) {
4687 if (rss_key_addr_cnt > 16)
4688 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
4689 V_KEYWRADDRX(idx >> 4) |
4690 V_T6_VFWRADDR(idx) | F_KEYWREN);
4692 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
4693 V_KEYWRADDR(idx) | F_KEYWREN);
4698 * t4_read_rss_pf_config - read PF RSS Configuration Table
4699 * @adapter: the adapter
4700 * @index: the entry in the PF RSS table to read
4701 * @valp: where to store the returned value
4703 * Reads the PF RSS Configuration Table at the specified index and returns
4704 * the value found there.
4706 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
4709 if (t4_use_ldst(adapter))
4710 t4_fw_tp_pio_rw(adapter, valp, 1,
4711 A_TP_RSS_PF0_CONFIG + index, 1);
4713 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4714 valp, 1, A_TP_RSS_PF0_CONFIG + index);
4718 * t4_write_rss_pf_config - write PF RSS Configuration Table
4719 * @adapter: the adapter
4720 * @index: the entry in the VF RSS table to read
4721 * @val: the value to store
4723 * Writes the PF RSS Configuration Table at the specified index with the
4726 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index,
4729 if (t4_use_ldst(adapter))
4730 t4_fw_tp_pio_rw(adapter, &val, 1,
4731 A_TP_RSS_PF0_CONFIG + index, 0);
4733 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4734 &val, 1, A_TP_RSS_PF0_CONFIG + index);
4738 * t4_read_rss_vf_config - read VF RSS Configuration Table
4739 * @adapter: the adapter
4740 * @index: the entry in the VF RSS table to read
4741 * @vfl: where to store the returned VFL
4742 * @vfh: where to store the returned VFH
4744 * Reads the VF RSS Configuration Table at the specified index and returns
4745 * the (VFL, VFH) values found there.
4747 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
4750 u32 vrt, mask, data;
4752 if (chip_id(adapter) <= CHELSIO_T5) {
4753 mask = V_VFWRADDR(M_VFWRADDR);
4754 data = V_VFWRADDR(index);
4756 mask = V_T6_VFWRADDR(M_T6_VFWRADDR);
4757 data = V_T6_VFWRADDR(index);
4760 * Request that the index'th VF Table values be read into VFL/VFH.
4762 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
4763 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
4764 vrt |= data | F_VFRDEN;
4765 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
4768 * Grab the VFL/VFH values ...
4770 if (t4_use_ldst(adapter)) {
4771 t4_fw_tp_pio_rw(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, 1);
4772 t4_fw_tp_pio_rw(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, 1);
4774 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4775 vfl, 1, A_TP_RSS_VFL_CONFIG);
4776 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4777 vfh, 1, A_TP_RSS_VFH_CONFIG);
4782 * t4_write_rss_vf_config - write VF RSS Configuration Table
4784 * @adapter: the adapter
4785 * @index: the entry in the VF RSS table to write
4786 * @vfl: the VFL to store
4787 * @vfh: the VFH to store
4789 * Writes the VF RSS Configuration Table at the specified index with the
4790 * specified (VFL, VFH) values.
4792 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
4795 u32 vrt, mask, data;
4797 if (chip_id(adapter) <= CHELSIO_T5) {
4798 mask = V_VFWRADDR(M_VFWRADDR);
4799 data = V_VFWRADDR(index);
4801 mask = V_T6_VFWRADDR(M_T6_VFWRADDR);
4802 data = V_T6_VFWRADDR(index);
4806 * Load up VFL/VFH with the values to be written ...
4808 if (t4_use_ldst(adapter)) {
4809 t4_fw_tp_pio_rw(adapter, &vfl, 1, A_TP_RSS_VFL_CONFIG, 0);
4810 t4_fw_tp_pio_rw(adapter, &vfh, 1, A_TP_RSS_VFH_CONFIG, 0);
4812 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4813 &vfl, 1, A_TP_RSS_VFL_CONFIG);
4814 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4815 &vfh, 1, A_TP_RSS_VFH_CONFIG);
4819 * Write the VFL/VFH into the VF Table at index'th location.
4821 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
4822 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
4823 vrt |= data | F_VFRDEN;
4824 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
4828 * t4_read_rss_pf_map - read PF RSS Map
4829 * @adapter: the adapter
4831 * Reads the PF RSS Map register and returns its value.
4833 u32 t4_read_rss_pf_map(struct adapter *adapter)
4837 if (t4_use_ldst(adapter))
4838 t4_fw_tp_pio_rw(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, 1);
4840 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4841 &pfmap, 1, A_TP_RSS_PF_MAP);
4846 * t4_write_rss_pf_map - write PF RSS Map
4847 * @adapter: the adapter
4848 * @pfmap: PF RSS Map value
4850 * Writes the specified value to the PF RSS Map register.
4852 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap)
4854 if (t4_use_ldst(adapter))
4855 t4_fw_tp_pio_rw(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, 0);
4857 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4858 &pfmap, 1, A_TP_RSS_PF_MAP);
4862 * t4_read_rss_pf_mask - read PF RSS Mask
4863 * @adapter: the adapter
4865 * Reads the PF RSS Mask register and returns its value.
4867 u32 t4_read_rss_pf_mask(struct adapter *adapter)
4871 if (t4_use_ldst(adapter))
4872 t4_fw_tp_pio_rw(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, 1);
4874 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4875 &pfmask, 1, A_TP_RSS_PF_MSK);
4880 * t4_write_rss_pf_mask - write PF RSS Mask
4881 * @adapter: the adapter
4882 * @pfmask: PF RSS Mask value
4884 * Writes the specified value to the PF RSS Mask register.
4886 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask)
4888 if (t4_use_ldst(adapter))
4889 t4_fw_tp_pio_rw(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, 0);
4891 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4892 &pfmask, 1, A_TP_RSS_PF_MSK);
4896 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
4897 * @adap: the adapter
4898 * @v4: holds the TCP/IP counter values
4899 * @v6: holds the TCP/IPv6 counter values
4901 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
4902 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
4904 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
4905 struct tp_tcp_stats *v6)
4907 u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
4909 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
4910 #define STAT(x) val[STAT_IDX(x)]
4911 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
4914 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
4915 ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST);
4916 v4->tcp_out_rsts = STAT(OUT_RST);
4917 v4->tcp_in_segs = STAT64(IN_SEG);
4918 v4->tcp_out_segs = STAT64(OUT_SEG);
4919 v4->tcp_retrans_segs = STAT64(RXT_SEG);
4922 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
4923 ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST);
4924 v6->tcp_out_rsts = STAT(OUT_RST);
4925 v6->tcp_in_segs = STAT64(IN_SEG);
4926 v6->tcp_out_segs = STAT64(OUT_SEG);
4927 v6->tcp_retrans_segs = STAT64(RXT_SEG);
4935 * t4_tp_get_err_stats - read TP's error MIB counters
4936 * @adap: the adapter
4937 * @st: holds the counter values
4939 * Returns the values of TP's error counters.
4941 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
4943 int nchan = adap->chip_params->nchan;
4945 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
4946 st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0);
4947 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
4948 st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0);
4949 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
4950 st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0);
4951 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
4952 st->tnl_cong_drops, nchan, A_TP_MIB_TNL_CNG_DROP_0);
4953 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
4954 st->ofld_chan_drops, nchan, A_TP_MIB_OFD_CHN_DROP_0);
4955 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
4956 st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0);
4957 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
4958 st->ofld_vlan_drops, nchan, A_TP_MIB_OFD_VLN_DROP_0);
4959 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
4960 st->tcp6_in_errs, nchan, A_TP_MIB_TCP_V6IN_ERR_0);
4962 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
4963 &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP);
4967 * t4_tp_get_proxy_stats - read TP's proxy MIB counters
4968 * @adap: the adapter
4969 * @st: holds the counter values
4971 * Returns the values of TP's proxy counters.
4973 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st)
4975 int nchan = adap->chip_params->nchan;
4977 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy,
4978 nchan, A_TP_MIB_TNL_LPBK_0);
4982 * t4_tp_get_cpl_stats - read TP's CPL MIB counters
4983 * @adap: the adapter
4984 * @st: holds the counter values
4986 * Returns the values of TP's CPL counters.
4988 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
4990 int nchan = adap->chip_params->nchan;
4992 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req,
4993 nchan, A_TP_MIB_CPL_IN_REQ_0);
4994 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->rsp,
4995 nchan, A_TP_MIB_CPL_OUT_RSP_0);
4999 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
5000 * @adap: the adapter
5001 * @st: holds the counter values
5003 * Returns the values of TP's RDMA counters.
5005 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
5007 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_pkt,
5008 2, A_TP_MIB_RQE_DFR_PKT);
5012 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
5013 * @adap: the adapter
5014 * @idx: the port index
5015 * @st: holds the counter values
5017 * Returns the values of TP's FCoE counters for the selected port.
5019 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
5020 struct tp_fcoe_stats *st)
5024 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->frames_ddp,
5025 1, A_TP_MIB_FCOE_DDP_0 + idx);
5026 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->frames_drop,
5027 1, A_TP_MIB_FCOE_DROP_0 + idx);
5028 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
5029 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx);
5030 st->octets_ddp = ((u64)val[0] << 32) | val[1];
5034 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
5035 * @adap: the adapter
5036 * @st: holds the counter values
5038 * Returns the values of TP's counters for non-TCP directly-placed packets.
5040 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
5044 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4,
5046 st->frames = val[0];
5048 st->octets = ((u64)val[2] << 32) | val[3];
5052 * t4_read_mtu_tbl - returns the values in the HW path MTU table
5053 * @adap: the adapter
5054 * @mtus: where to store the MTU values
5055 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
5057 * Reads the HW path MTU table.
5059 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
5064 for (i = 0; i < NMTUS; ++i) {
5065 t4_write_reg(adap, A_TP_MTU_TABLE,
5066 V_MTUINDEX(0xff) | V_MTUVALUE(i));
5067 v = t4_read_reg(adap, A_TP_MTU_TABLE);
5068 mtus[i] = G_MTUVALUE(v);
5070 mtu_log[i] = G_MTUWIDTH(v);
5075 * t4_read_cong_tbl - reads the congestion control table
5076 * @adap: the adapter
5077 * @incr: where to store the alpha values
5079 * Reads the additive increments programmed into the HW congestion
5082 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
5084 unsigned int mtu, w;
5086 for (mtu = 0; mtu < NMTUS; ++mtu)
5087 for (w = 0; w < NCCTRL_WIN; ++w) {
5088 t4_write_reg(adap, A_TP_CCTRL_TABLE,
5089 V_ROWINDEX(0xffff) | (mtu << 5) | w);
5090 incr[mtu][w] = (u16)t4_read_reg(adap,
5091 A_TP_CCTRL_TABLE) & 0x1fff;
5096 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
5097 * @adap: the adapter
5098 * @addr: the indirect TP register address
5099 * @mask: specifies the field within the register to modify
5100 * @val: new value for the field
5102 * Sets a field of an indirect TP register to the given value.
5104 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
5105 unsigned int mask, unsigned int val)
5107 t4_write_reg(adap, A_TP_PIO_ADDR, addr);
5108 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
5109 t4_write_reg(adap, A_TP_PIO_DATA, val);
5113 * init_cong_ctrl - initialize congestion control parameters
5114 * @a: the alpha values for congestion control
5115 * @b: the beta values for congestion control
5117 * Initialize the congestion control parameters.
5119 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
5121 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
5146 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
5149 b[13] = b[14] = b[15] = b[16] = 3;
5150 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
5151 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
5156 /* The minimum additive increment value for the congestion control table */
5157 #define CC_MIN_INCR 2U
5160 * t4_load_mtus - write the MTU and congestion control HW tables
5161 * @adap: the adapter
5162 * @mtus: the values for the MTU table
5163 * @alpha: the values for the congestion control alpha parameter
5164 * @beta: the values for the congestion control beta parameter
5166 * Write the HW MTU table with the supplied MTUs and the high-speed
5167 * congestion control table with the supplied alpha, beta, and MTUs.
5168 * We write the two tables together because the additive increments
5169 * depend on the MTUs.
5171 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
5172 const unsigned short *alpha, const unsigned short *beta)
5174 static const unsigned int avg_pkts[NCCTRL_WIN] = {
5175 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
5176 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
5177 28672, 40960, 57344, 81920, 114688, 163840, 229376
5182 for (i = 0; i < NMTUS; ++i) {
5183 unsigned int mtu = mtus[i];
5184 unsigned int log2 = fls(mtu);
5186 if (!(mtu & ((1 << log2) >> 2))) /* round */
5188 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
5189 V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
5191 for (w = 0; w < NCCTRL_WIN; ++w) {
5194 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
5197 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
5198 (w << 16) | (beta[w] << 13) | inc);
5204 * t4_set_pace_tbl - set the pace table
5205 * @adap: the adapter
5206 * @pace_vals: the pace values in microseconds
5207 * @start: index of the first entry in the HW pace table to set
5208 * @n: how many entries to set
5210 * Sets (a subset of the) HW pace table.
5212 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
5213 unsigned int start, unsigned int n)
5215 unsigned int vals[NTX_SCHED], i;
5216 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
5221 /* convert values from us to dack ticks, rounding to closest value */
5222 for (i = 0; i < n; i++, pace_vals++) {
5223 vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
5224 if (vals[i] > 0x7ff)
5226 if (*pace_vals && vals[i] == 0)
5229 for (i = 0; i < n; i++, start++)
5230 t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
5235 * t4_set_sched_bps - set the bit rate for a HW traffic scheduler
5236 * @adap: the adapter
5237 * @kbps: target rate in Kbps
5238 * @sched: the scheduler index
5240 * Configure a Tx HW scheduler for the target rate.
5242 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
5244 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
5245 unsigned int clk = adap->params.vpd.cclk * 1000;
5246 unsigned int selected_cpt = 0, selected_bpt = 0;
5249 kbps *= 125; /* -> bytes */
5250 for (cpt = 1; cpt <= 255; cpt++) {
5252 bpt = (kbps + tps / 2) / tps;
5253 if (bpt > 0 && bpt <= 255) {
5255 delta = v >= kbps ? v - kbps : kbps - v;
5256 if (delta < mindelta) {
5261 } else if (selected_cpt)
5267 t4_write_reg(adap, A_TP_TM_PIO_ADDR,
5268 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
5269 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
5271 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
5273 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
5274 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
5279 * t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
5280 * @adap: the adapter
5281 * @sched: the scheduler index
5282 * @ipg: the interpacket delay in tenths of nanoseconds
5284 * Set the interpacket delay for a HW packet rate scheduler.
5286 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
5288 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
5290 /* convert ipg to nearest number of core clocks */
5291 ipg *= core_ticks_per_usec(adap);
5292 ipg = (ipg + 5000) / 10000;
5293 if (ipg > M_TXTIMERSEPQ0)
5296 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
5297 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
5299 v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
5301 v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
5302 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
5303 t4_read_reg(adap, A_TP_TM_PIO_DATA);
5308 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
5309 * clocks. The formula is
5311 * bytes/s = bytes256 * 256 * ClkFreq / 4096
5313 * which is equivalent to
5315 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
5317 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
5319 u64 v = bytes256 * adap->params.vpd.cclk;
5321 return v * 62 + v / 2;
5325 * t4_get_chan_txrate - get the current per channel Tx rates
5326 * @adap: the adapter
5327 * @nic_rate: rates for NIC traffic
5328 * @ofld_rate: rates for offloaded traffic
5330 * Return the current Tx rates in bytes/s for NIC and offloaded traffic
5333 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
5337 v = t4_read_reg(adap, A_TP_TX_TRATE);
5338 nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
5339 nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
5340 if (adap->chip_params->nchan > 2) {
5341 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
5342 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
5345 v = t4_read_reg(adap, A_TP_TX_ORATE);
5346 ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
5347 ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
5348 if (adap->chip_params->nchan > 2) {
5349 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
5350 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
5355 * t4_set_trace_filter - configure one of the tracing filters
5356 * @adap: the adapter
5357 * @tp: the desired trace filter parameters
5358 * @idx: which filter to configure
5359 * @enable: whether to enable or disable the filter
5361 * Configures one of the tracing filters available in HW. If @tp is %NULL
5362 * it indicates that the filter is already written in the register and it
5363 * just needs to be enabled or disabled.
5365 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
5366 int idx, int enable)
5368 int i, ofst = idx * 4;
5369 u32 data_reg, mask_reg, cfg;
5370 u32 multitrc = F_TRCMULTIFILTER;
5371 u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN;
5373 if (idx < 0 || idx >= NTRACE)
5376 if (tp == NULL || !enable) {
5377 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en,
5383 * TODO - After T4 data book is updated, specify the exact
5386 * See T4 data book - MPS section for a complete description
5387 * of the below if..else handling of A_MPS_TRC_CFG register
5390 cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
5391 if (cfg & F_TRCMULTIFILTER) {
5393 * If multiple tracers are enabled, then maximum
5394 * capture size is 2.5KB (FIFO size of a single channel)
5395 * minus 2 flits for CPL_TRACE_PKT header.
5397 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
5401 * If multiple tracers are disabled, to avoid deadlocks
5402 * maximum packet capture size of 9600 bytes is recommended.
5403 * Also in this mode, only trace0 can be enabled and running.
5406 if (tp->snap_len > 9600 || idx)
5410 if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 ||
5411 tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET ||
5412 tp->min_len > M_TFMINPKTSIZE)
5415 /* stop the tracer we'll be changing */
5416 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0);
5418 idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
5419 data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
5420 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
5422 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5423 t4_write_reg(adap, data_reg, tp->data[i]);
5424 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
5426 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
5427 V_TFCAPTUREMAX(tp->snap_len) |
5428 V_TFMINPKTSIZE(tp->min_len));
5429 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
5430 V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en |
5432 V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) :
5433 V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert)));
5439 * t4_get_trace_filter - query one of the tracing filters
5440 * @adap: the adapter
5441 * @tp: the current trace filter parameters
5442 * @idx: which trace filter to query
5443 * @enabled: non-zero if the filter is enabled
5445 * Returns the current settings of one of the HW tracing filters.
5447 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
5451 int i, ofst = idx * 4;
5452 u32 data_reg, mask_reg;
5454 ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
5455 ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
5458 *enabled = !!(ctla & F_TFEN);
5459 tp->port = G_TFPORT(ctla);
5460 tp->invert = !!(ctla & F_TFINVERTMATCH);
5462 *enabled = !!(ctla & F_T5_TFEN);
5463 tp->port = G_T5_TFPORT(ctla);
5464 tp->invert = !!(ctla & F_T5_TFINVERTMATCH);
5466 tp->snap_len = G_TFCAPTUREMAX(ctlb);
5467 tp->min_len = G_TFMINPKTSIZE(ctlb);
5468 tp->skip_ofst = G_TFOFFSET(ctla);
5469 tp->skip_len = G_TFLENGTH(ctla);
5471 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
5472 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
5473 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
5475 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5476 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
5477 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
5482 * t4_pmtx_get_stats - returns the HW stats from PMTX
5483 * @adap: the adapter
5484 * @cnt: where to store the count statistics
5485 * @cycles: where to store the cycle statistics
5487 * Returns performance statistics from PMTX.
5489 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5494 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
5495 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
5496 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
5498 cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
5500 t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
5501 A_PM_TX_DBG_DATA, data, 2,
5502 A_PM_TX_DBG_STAT_MSB);
5503 cycles[i] = (((u64)data[0] << 32) | data[1]);
5509 * t4_pmrx_get_stats - returns the HW stats from PMRX
5510 * @adap: the adapter
5511 * @cnt: where to store the count statistics
5512 * @cycles: where to store the cycle statistics
5514 * Returns performance statistics from PMRX.
5516 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5521 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
5522 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
5523 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
5525 cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
5527 t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
5528 A_PM_RX_DBG_DATA, data, 2,
5529 A_PM_RX_DBG_STAT_MSB);
5530 cycles[i] = (((u64)data[0] << 32) | data[1]);
5536 * t4_get_mps_bg_map - return the buffer groups associated with a port
5537 * @adap: the adapter
5538 * @idx: the port index
5540 * Returns a bitmap indicating which MPS buffer groups are associated
5541 * with the given port. Bit i is set if buffer group i is used by the
5544 static unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
5546 u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
5549 return idx == 0 ? 0xf : 0;
5550 if (n == 1 && chip_id(adap) <= CHELSIO_T5)
5551 return idx < 2 ? (3 << (2 * idx)) : 0;
5556 * t4_get_port_type_description - return Port Type string description
5557 * @port_type: firmware Port Type enumeration
5559 const char *t4_get_port_type_description(enum fw_port_type port_type)
5561 static const char *const port_type_description[] = {
5580 if (port_type < ARRAY_SIZE(port_type_description))
5581 return port_type_description[port_type];
5586 * t4_get_port_stats_offset - collect port stats relative to a previous
5588 * @adap: The adapter
5590 * @stats: Current stats to fill
5591 * @offset: Previous stats snapshot
5593 void t4_get_port_stats_offset(struct adapter *adap, int idx,
5594 struct port_stats *stats,
5595 struct port_stats *offset)
5600 t4_get_port_stats(adap, idx, stats);
5601 for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
5602 i < (sizeof(struct port_stats)/sizeof(u64)) ;
5608 * t4_get_port_stats - collect port statistics
5609 * @adap: the adapter
5610 * @idx: the port index
5611 * @p: the stats structure to fill
5613 * Collect statistics related to the given port from HW.
5615 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
5617 u32 bgmap = t4_get_mps_bg_map(adap, idx);
5619 #define GET_STAT(name) \
5620 t4_read_reg64(adap, \
5621 (is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
5622 T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
5623 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
5625 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
5626 p->tx_octets = GET_STAT(TX_PORT_BYTES);
5627 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
5628 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
5629 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
5630 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
5631 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
5632 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
5633 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
5634 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
5635 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
5636 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
5637 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
5638 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
5639 p->tx_drop = GET_STAT(TX_PORT_DROP);
5640 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
5641 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
5642 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
5643 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
5644 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
5645 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
5646 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
5647 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
5649 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
5650 p->rx_octets = GET_STAT(RX_PORT_BYTES);
5651 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
5652 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
5653 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
5654 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
5655 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
5656 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
5657 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
5658 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
5659 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
5660 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
5661 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
5662 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
5663 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
5664 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
5665 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
5666 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
5667 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
5668 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
5669 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
5670 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
5671 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
5672 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
5673 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
5674 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
5675 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
5677 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
5678 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
5679 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
5680 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
5681 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
5682 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
5683 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
5684 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
5691 * t4_get_lb_stats - collect loopback port statistics
5692 * @adap: the adapter
5693 * @idx: the loopback port index
5694 * @p: the stats structure to fill
5696 * Return HW statistics for the given loopback port.
5698 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
5700 u32 bgmap = t4_get_mps_bg_map(adap, idx);
5702 #define GET_STAT(name) \
5703 t4_read_reg64(adap, \
5705 PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
5706 T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
5707 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
5709 p->octets = GET_STAT(BYTES);
5710 p->frames = GET_STAT(FRAMES);
5711 p->bcast_frames = GET_STAT(BCAST);
5712 p->mcast_frames = GET_STAT(MCAST);
5713 p->ucast_frames = GET_STAT(UCAST);
5714 p->error_frames = GET_STAT(ERROR);
5716 p->frames_64 = GET_STAT(64B);
5717 p->frames_65_127 = GET_STAT(65B_127B);
5718 p->frames_128_255 = GET_STAT(128B_255B);
5719 p->frames_256_511 = GET_STAT(256B_511B);
5720 p->frames_512_1023 = GET_STAT(512B_1023B);
5721 p->frames_1024_1518 = GET_STAT(1024B_1518B);
5722 p->frames_1519_max = GET_STAT(1519B_MAX);
5723 p->drop = GET_STAT(DROP_FRAMES);
5725 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
5726 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
5727 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
5728 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
5729 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
5730 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
5731 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
5732 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
5739 * t4_wol_magic_enable - enable/disable magic packet WoL
5740 * @adap: the adapter
5741 * @port: the physical port index
5742 * @addr: MAC address expected in magic packets, %NULL to disable
5744 * Enables/disables magic packet wake-on-LAN for the selected port.
5746 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
5749 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
5752 mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO);
5753 mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI);
5754 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
5756 mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO);
5757 mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI);
5758 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
5762 t4_write_reg(adap, mag_id_reg_l,
5763 (addr[2] << 24) | (addr[3] << 16) |
5764 (addr[4] << 8) | addr[5]);
5765 t4_write_reg(adap, mag_id_reg_h,
5766 (addr[0] << 8) | addr[1]);
5768 t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN,
5769 V_MAGICEN(addr != NULL));
5773 * t4_wol_pat_enable - enable/disable pattern-based WoL
5774 * @adap: the adapter
5775 * @port: the physical port index
5776 * @map: bitmap of which HW pattern filters to set
5777 * @mask0: byte mask for bytes 0-63 of a packet
5778 * @mask1: byte mask for bytes 64-127 of a packet
5779 * @crc: Ethernet CRC for selected bytes
5780 * @enable: enable/disable switch
5782 * Sets the pattern filters indicated in @map to mask out the bytes
5783 * specified in @mask0/@mask1 in received packets and compare the CRC of
5784 * the resulting packet against @crc. If @enable is %true pattern-based
5785 * WoL is enabled, otherwise disabled.
5787 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
5788 u64 mask0, u64 mask1, unsigned int crc, bool enable)
5794 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
5796 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
5799 t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
5805 #define EPIO_REG(name) \
5806 (is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \
5807 T5_PORT_REG(port, A_MAC_PORT_EPIO_##name))
5809 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
5810 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
5811 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
5813 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
5817 /* write byte masks */
5818 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
5819 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
5820 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
5821 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
5825 t4_write_reg(adap, EPIO_REG(DATA0), crc);
5826 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
5827 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
5828 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
5833 t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN);
5837 /* t4_mk_filtdelwr - create a delete filter WR
5838 * @ftid: the filter ID
5839 * @wr: the filter work request to populate
5840 * @qid: ingress queue to receive the delete notification
5842 * Creates a filter work request to delete the supplied filter. If @qid is
5843 * negative the delete notification is suppressed.
5845 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
5847 memset(wr, 0, sizeof(*wr));
5848 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
5849 wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
5850 wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
5851 V_FW_FILTER_WR_NOREPLY(qid < 0));
5852 wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
5854 wr->rx_chan_rx_rpl_iq =
5855 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
5858 #define INIT_CMD(var, cmd, rd_wr) do { \
5859 (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
5860 F_FW_CMD_REQUEST | \
5861 F_FW_CMD_##rd_wr); \
5862 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
5865 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
5869 struct fw_ldst_cmd c;
5871 memset(&c, 0, sizeof(c));
5872 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE);
5873 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
5877 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5878 c.u.addrval.addr = cpu_to_be32(addr);
5879 c.u.addrval.val = cpu_to_be32(val);
5881 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5885 * t4_mdio_rd - read a PHY register through MDIO
5886 * @adap: the adapter
5887 * @mbox: mailbox to use for the FW command
5888 * @phy_addr: the PHY address
5889 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
5890 * @reg: the register to read
5891 * @valp: where to store the value
5893 * Issues a FW command through the given mailbox to read a PHY register.
5895 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
5896 unsigned int mmd, unsigned int reg, unsigned int *valp)
5900 struct fw_ldst_cmd c;
5902 memset(&c, 0, sizeof(c));
5903 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
5904 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
5905 F_FW_CMD_REQUEST | F_FW_CMD_READ |
5907 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5908 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
5909 V_FW_LDST_CMD_MMD(mmd));
5910 c.u.mdio.raddr = cpu_to_be16(reg);
5912 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5914 *valp = be16_to_cpu(c.u.mdio.rval);
5919 * t4_mdio_wr - write a PHY register through MDIO
5920 * @adap: the adapter
5921 * @mbox: mailbox to use for the FW command
5922 * @phy_addr: the PHY address
5923 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
5924 * @reg: the register to write
5925 * @valp: value to write
5927 * Issues a FW command through the given mailbox to write a PHY register.
5929 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
5930 unsigned int mmd, unsigned int reg, unsigned int val)
5933 struct fw_ldst_cmd c;
5935 memset(&c, 0, sizeof(c));
5936 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
5937 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
5938 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
5940 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5941 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
5942 V_FW_LDST_CMD_MMD(mmd));
5943 c.u.mdio.raddr = cpu_to_be16(reg);
5944 c.u.mdio.rval = cpu_to_be16(val);
5946 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5951 * t4_sge_decode_idma_state - decode the idma state
5952 * @adap: the adapter
5953 * @state: the state idma is stuck in
5955 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
5957 static const char * const t4_decode[] = {
5959 "IDMA_PUSH_MORE_CPL_FIFO",
5960 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
5962 "IDMA_PHYSADDR_SEND_PCIEHDR",
5963 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
5964 "IDMA_PHYSADDR_SEND_PAYLOAD",
5965 "IDMA_SEND_FIFO_TO_IMSG",
5966 "IDMA_FL_REQ_DATA_FL_PREP",
5967 "IDMA_FL_REQ_DATA_FL",
5969 "IDMA_FL_H_REQ_HEADER_FL",
5970 "IDMA_FL_H_SEND_PCIEHDR",
5971 "IDMA_FL_H_PUSH_CPL_FIFO",
5972 "IDMA_FL_H_SEND_CPL",
5973 "IDMA_FL_H_SEND_IP_HDR_FIRST",
5974 "IDMA_FL_H_SEND_IP_HDR",
5975 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
5976 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
5977 "IDMA_FL_H_SEND_IP_HDR_PADDING",
5978 "IDMA_FL_D_SEND_PCIEHDR",
5979 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
5980 "IDMA_FL_D_REQ_NEXT_DATA_FL",
5981 "IDMA_FL_SEND_PCIEHDR",
5982 "IDMA_FL_PUSH_CPL_FIFO",
5984 "IDMA_FL_SEND_PAYLOAD_FIRST",
5985 "IDMA_FL_SEND_PAYLOAD",
5986 "IDMA_FL_REQ_NEXT_DATA_FL",
5987 "IDMA_FL_SEND_NEXT_PCIEHDR",
5988 "IDMA_FL_SEND_PADDING",
5989 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
5990 "IDMA_FL_SEND_FIFO_TO_IMSG",
5991 "IDMA_FL_REQ_DATAFL_DONE",
5992 "IDMA_FL_REQ_HEADERFL_DONE",
5994 static const char * const t5_decode[] = {
5997 "IDMA_PUSH_MORE_CPL_FIFO",
5998 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
5999 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6000 "IDMA_PHYSADDR_SEND_PCIEHDR",
6001 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6002 "IDMA_PHYSADDR_SEND_PAYLOAD",
6003 "IDMA_SEND_FIFO_TO_IMSG",
6004 "IDMA_FL_REQ_DATA_FL",
6006 "IDMA_FL_DROP_SEND_INC",
6007 "IDMA_FL_H_REQ_HEADER_FL",
6008 "IDMA_FL_H_SEND_PCIEHDR",
6009 "IDMA_FL_H_PUSH_CPL_FIFO",
6010 "IDMA_FL_H_SEND_CPL",
6011 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6012 "IDMA_FL_H_SEND_IP_HDR",
6013 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6014 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6015 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6016 "IDMA_FL_D_SEND_PCIEHDR",
6017 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6018 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6019 "IDMA_FL_SEND_PCIEHDR",
6020 "IDMA_FL_PUSH_CPL_FIFO",
6022 "IDMA_FL_SEND_PAYLOAD_FIRST",
6023 "IDMA_FL_SEND_PAYLOAD",
6024 "IDMA_FL_REQ_NEXT_DATA_FL",
6025 "IDMA_FL_SEND_NEXT_PCIEHDR",
6026 "IDMA_FL_SEND_PADDING",
6027 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6029 static const char * const t6_decode[] = {
6031 "IDMA_PUSH_MORE_CPL_FIFO",
6032 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6033 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6034 "IDMA_PHYSADDR_SEND_PCIEHDR",
6035 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6036 "IDMA_PHYSADDR_SEND_PAYLOAD",
6037 "IDMA_FL_REQ_DATA_FL",
6039 "IDMA_FL_DROP_SEND_INC",
6040 "IDMA_FL_H_REQ_HEADER_FL",
6041 "IDMA_FL_H_SEND_PCIEHDR",
6042 "IDMA_FL_H_PUSH_CPL_FIFO",
6043 "IDMA_FL_H_SEND_CPL",
6044 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6045 "IDMA_FL_H_SEND_IP_HDR",
6046 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6047 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6048 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6049 "IDMA_FL_D_SEND_PCIEHDR",
6050 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6051 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6052 "IDMA_FL_SEND_PCIEHDR",
6053 "IDMA_FL_PUSH_CPL_FIFO",
6055 "IDMA_FL_SEND_PAYLOAD_FIRST",
6056 "IDMA_FL_SEND_PAYLOAD",
6057 "IDMA_FL_REQ_NEXT_DATA_FL",
6058 "IDMA_FL_SEND_NEXT_PCIEHDR",
6059 "IDMA_FL_SEND_PADDING",
6060 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6062 static const u32 sge_regs[] = {
6063 A_SGE_DEBUG_DATA_LOW_INDEX_2,
6064 A_SGE_DEBUG_DATA_LOW_INDEX_3,
6065 A_SGE_DEBUG_DATA_HIGH_INDEX_10,
6067 const char * const *sge_idma_decode;
6068 int sge_idma_decode_nstates;
6070 unsigned int chip_version = chip_id(adapter);
6072 /* Select the right set of decode strings to dump depending on the
6073 * adapter chip type.
6075 switch (chip_version) {
6077 sge_idma_decode = (const char * const *)t4_decode;
6078 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
6082 sge_idma_decode = (const char * const *)t5_decode;
6083 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
6087 sge_idma_decode = (const char * const *)t6_decode;
6088 sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
6092 CH_ERR(adapter, "Unsupported chip version %d\n", chip_version);
6096 if (state < sge_idma_decode_nstates)
6097 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
6099 CH_WARN(adapter, "idma state %d unknown\n", state);
6101 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
6102 CH_WARN(adapter, "SGE register %#x value %#x\n",
6103 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
6107 * t4_sge_ctxt_flush - flush the SGE context cache
6108 * @adap: the adapter
6109 * @mbox: mailbox to use for the FW command
6111 * Issues a FW command through the given mailbox to flush the
6112 * SGE context cache.
6114 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
6118 struct fw_ldst_cmd c;
6120 memset(&c, 0, sizeof(c));
6121 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC);
6122 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6123 F_FW_CMD_REQUEST | F_FW_CMD_READ |
6125 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6126 c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH);
6128 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6133 * t4_fw_hello - establish communication with FW
6134 * @adap: the adapter
6135 * @mbox: mailbox to use for the FW command
6136 * @evt_mbox: mailbox to receive async FW events
6137 * @master: specifies the caller's willingness to be the device master
6138 * @state: returns the current device state (if non-NULL)
6140 * Issues a command to establish communication with FW. Returns either
6141 * an error (negative integer) or the mailbox of the Master PF.
6143 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
6144 enum dev_master master, enum dev_state *state)
6147 struct fw_hello_cmd c;
6149 unsigned int master_mbox;
6150 int retries = FW_CMD_HELLO_RETRIES;
6153 memset(&c, 0, sizeof(c));
6154 INIT_CMD(c, HELLO, WRITE);
6155 c.err_to_clearinit = cpu_to_be32(
6156 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
6157 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
6158 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ?
6159 mbox : M_FW_HELLO_CMD_MBMASTER) |
6160 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
6161 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
6162 F_FW_HELLO_CMD_CLEARINIT);
6165 * Issue the HELLO command to the firmware. If it's not successful
6166 * but indicates that we got a "busy" or "timeout" condition, retry
6167 * the HELLO until we exhaust our retry limit. If we do exceed our
6168 * retry limit, check to see if the firmware left us any error
6169 * information and report that if so ...
6171 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6172 if (ret != FW_SUCCESS) {
6173 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
6175 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
6176 t4_report_fw_error(adap);
6180 v = be32_to_cpu(c.err_to_clearinit);
6181 master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
6183 if (v & F_FW_HELLO_CMD_ERR)
6184 *state = DEV_STATE_ERR;
6185 else if (v & F_FW_HELLO_CMD_INIT)
6186 *state = DEV_STATE_INIT;
6188 *state = DEV_STATE_UNINIT;
6192 * If we're not the Master PF then we need to wait around for the
6193 * Master PF Driver to finish setting up the adapter.
6195 * Note that we also do this wait if we're a non-Master-capable PF and
6196 * there is no current Master PF; a Master PF may show up momentarily
6197 * and we wouldn't want to fail pointlessly. (This can happen when an
6198 * OS loads lots of different drivers rapidly at the same time). In
6199 * this case, the Master PF returned by the firmware will be
6200 * M_PCIE_FW_MASTER so the test below will work ...
6202 if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
6203 master_mbox != mbox) {
6204 int waiting = FW_CMD_HELLO_TIMEOUT;
6207 * Wait for the firmware to either indicate an error or
6208 * initialized state. If we see either of these we bail out
6209 * and report the issue to the caller. If we exhaust the
6210 * "hello timeout" and we haven't exhausted our retries, try
6211 * again. Otherwise bail with a timeout error.
6220 * If neither Error nor Initialialized are indicated
6221 * by the firmware keep waiting till we exhaust our
6222 * timeout ... and then retry if we haven't exhausted
6225 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
6226 if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
6237 * We either have an Error or Initialized condition
6238 * report errors preferentially.
6241 if (pcie_fw & F_PCIE_FW_ERR)
6242 *state = DEV_STATE_ERR;
6243 else if (pcie_fw & F_PCIE_FW_INIT)
6244 *state = DEV_STATE_INIT;
6248 * If we arrived before a Master PF was selected and
6249 * there's not a valid Master PF, grab its identity
6252 if (master_mbox == M_PCIE_FW_MASTER &&
6253 (pcie_fw & F_PCIE_FW_MASTER_VLD))
6254 master_mbox = G_PCIE_FW_MASTER(pcie_fw);
6263 * t4_fw_bye - end communication with FW
6264 * @adap: the adapter
6265 * @mbox: mailbox to use for the FW command
6267 * Issues a command to terminate communication with FW.
6269 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
6271 struct fw_bye_cmd c;
6273 memset(&c, 0, sizeof(c));
6274 INIT_CMD(c, BYE, WRITE);
6275 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6279 * t4_fw_reset - issue a reset to FW
6280 * @adap: the adapter
6281 * @mbox: mailbox to use for the FW command
6282 * @reset: specifies the type of reset to perform
6284 * Issues a reset command of the specified type to FW.
6286 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
6288 struct fw_reset_cmd c;
6290 memset(&c, 0, sizeof(c));
6291 INIT_CMD(c, RESET, WRITE);
6292 c.val = cpu_to_be32(reset);
6293 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6297 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
6298 * @adap: the adapter
6299 * @mbox: mailbox to use for the FW RESET command (if desired)
6300 * @force: force uP into RESET even if FW RESET command fails
6302 * Issues a RESET command to firmware (if desired) with a HALT indication
6303 * and then puts the microprocessor into RESET state. The RESET command
6304 * will only be issued if a legitimate mailbox is provided (mbox <=
6305 * M_PCIE_FW_MASTER).
6307 * This is generally used in order for the host to safely manipulate the
6308 * adapter without fear of conflicting with whatever the firmware might
6309 * be doing. The only way out of this state is to RESTART the firmware
6312 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
6317 * If a legitimate mailbox is provided, issue a RESET command
6318 * with a HALT indication.
6320 if (mbox <= M_PCIE_FW_MASTER) {
6321 struct fw_reset_cmd c;
6323 memset(&c, 0, sizeof(c));
6324 INIT_CMD(c, RESET, WRITE);
6325 c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
6326 c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
6327 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6331 * Normally we won't complete the operation if the firmware RESET
6332 * command fails but if our caller insists we'll go ahead and put the
6333 * uP into RESET. This can be useful if the firmware is hung or even
6334 * missing ... We'll have to take the risk of putting the uP into
6335 * RESET without the cooperation of firmware in that case.
6337 * We also force the firmware's HALT flag to be on in case we bypassed
6338 * the firmware RESET command above or we're dealing with old firmware
6339 * which doesn't have the HALT capability. This will serve as a flag
6340 * for the incoming firmware to know that it's coming out of a HALT
6341 * rather than a RESET ... if it's new enough to understand that ...
6343 if (ret == 0 || force) {
6344 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
6345 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
6350 * And we always return the result of the firmware RESET command
6351 * even when we force the uP into RESET ...
6357 * t4_fw_restart - restart the firmware by taking the uP out of RESET
6358 * @adap: the adapter
6359 * @reset: if we want to do a RESET to restart things
6361 * Restart firmware previously halted by t4_fw_halt(). On successful
6362 * return the previous PF Master remains as the new PF Master and there
6363 * is no need to issue a new HELLO command, etc.
6365 * We do this in two ways:
6367 * 1. If we're dealing with newer firmware we'll simply want to take
6368 * the chip's microprocessor out of RESET. This will cause the
6369 * firmware to start up from its start vector. And then we'll loop
6370 * until the firmware indicates it's started again (PCIE_FW.HALT
6371 * reset to 0) or we timeout.
6373 * 2. If we're dealing with older firmware then we'll need to RESET
6374 * the chip since older firmware won't recognize the PCIE_FW.HALT
6375 * flag and automatically RESET itself on startup.
6377 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
6381 * Since we're directing the RESET instead of the firmware
6382 * doing it automatically, we need to clear the PCIE_FW.HALT
6385 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
6388 * If we've been given a valid mailbox, first try to get the
6389 * firmware to do the RESET. If that works, great and we can
6390 * return success. Otherwise, if we haven't been given a
6391 * valid mailbox or the RESET command failed, fall back to
6392 * hitting the chip with a hammer.
6394 if (mbox <= M_PCIE_FW_MASTER) {
6395 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
6397 if (t4_fw_reset(adap, mbox,
6398 F_PIORST | F_PIORSTMODE) == 0)
6402 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
6407 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
6408 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
6409 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
6420 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
6421 * @adap: the adapter
6422 * @mbox: mailbox to use for the FW RESET command (if desired)
6423 * @fw_data: the firmware image to write
6425 * @force: force upgrade even if firmware doesn't cooperate
6427 * Perform all of the steps necessary for upgrading an adapter's
6428 * firmware image. Normally this requires the cooperation of the
6429 * existing firmware in order to halt all existing activities
6430 * but if an invalid mailbox token is passed in we skip that step
6431 * (though we'll still put the adapter microprocessor into RESET in
6434 * On successful return the new firmware will have been loaded and
6435 * the adapter will have been fully RESET losing all previous setup
6436 * state. On unsuccessful return the adapter may be completely hosed ...
6437 * positive errno indicates that the adapter is ~probably~ intact, a
6438 * negative errno indicates that things are looking bad ...
6440 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
6441 const u8 *fw_data, unsigned int size, int force)
6443 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
6444 unsigned int bootstrap =
6445 be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
6448 if (!t4_fw_matches_chip(adap, fw_hdr))
6452 ret = t4_fw_halt(adap, mbox, force);
6453 if (ret < 0 && !force)
6457 ret = t4_load_fw(adap, fw_data, size);
6458 if (ret < 0 || bootstrap)
6462 * Older versions of the firmware don't understand the new
6463 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
6464 * restart. So for newly loaded older firmware we'll have to do the
6465 * RESET for it so it starts up on a clean slate. We can tell if
6466 * the newly loaded firmware will handle this right by checking
6467 * its header flags to see if it advertises the capability.
6469 reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
6470 return t4_fw_restart(adap, mbox, reset);
6474 * t4_fw_initialize - ask FW to initialize the device
6475 * @adap: the adapter
6476 * @mbox: mailbox to use for the FW command
6478 * Issues a command to FW to partially initialize the device. This
6479 * performs initialization that generally doesn't depend on user input.
6481 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
6483 struct fw_initialize_cmd c;
6485 memset(&c, 0, sizeof(c));
6486 INIT_CMD(c, INITIALIZE, WRITE);
6487 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6491 * t4_query_params_rw - query FW or device parameters
6492 * @adap: the adapter
6493 * @mbox: mailbox to use for the FW command
6496 * @nparams: the number of parameters
6497 * @params: the parameter names
6498 * @val: the parameter values
6499 * @rw: Write and read flag
6501 * Reads the value of FW or device parameters. Up to 7 parameters can be
6504 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
6505 unsigned int vf, unsigned int nparams, const u32 *params,
6509 struct fw_params_cmd c;
6510 __be32 *p = &c.param[0].mnem;
6515 memset(&c, 0, sizeof(c));
6516 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
6517 F_FW_CMD_REQUEST | F_FW_CMD_READ |
6518 V_FW_PARAMS_CMD_PFN(pf) |
6519 V_FW_PARAMS_CMD_VFN(vf));
6520 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6522 for (i = 0; i < nparams; i++) {
6523 *p++ = cpu_to_be32(*params++);
6525 *p = cpu_to_be32(*(val + i));
6529 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6531 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
6532 *val++ = be32_to_cpu(*p);
6536 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
6537 unsigned int vf, unsigned int nparams, const u32 *params,
6540 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
6544 * t4_set_params_timeout - sets FW or device parameters
6545 * @adap: the adapter
6546 * @mbox: mailbox to use for the FW command
6549 * @nparams: the number of parameters
6550 * @params: the parameter names
6551 * @val: the parameter values
6552 * @timeout: the timeout time
6554 * Sets the value of FW or device parameters. Up to 7 parameters can be
6555 * specified at once.
6557 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
6558 unsigned int pf, unsigned int vf,
6559 unsigned int nparams, const u32 *params,
6560 const u32 *val, int timeout)
6562 struct fw_params_cmd c;
6563 __be32 *p = &c.param[0].mnem;
6568 memset(&c, 0, sizeof(c));
6569 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
6570 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
6571 V_FW_PARAMS_CMD_PFN(pf) |
6572 V_FW_PARAMS_CMD_VFN(vf));
6573 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6576 *p++ = cpu_to_be32(*params++);
6577 *p++ = cpu_to_be32(*val++);
6580 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
6584 * t4_set_params - sets FW or device parameters
6585 * @adap: the adapter
6586 * @mbox: mailbox to use for the FW command
6589 * @nparams: the number of parameters
6590 * @params: the parameter names
6591 * @val: the parameter values
6593 * Sets the value of FW or device parameters. Up to 7 parameters can be
6594 * specified at once.
6596 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
6597 unsigned int vf, unsigned int nparams, const u32 *params,
6600 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
6601 FW_CMD_MAX_TIMEOUT);
6605 * t4_cfg_pfvf - configure PF/VF resource limits
6606 * @adap: the adapter
6607 * @mbox: mailbox to use for the FW command
6608 * @pf: the PF being configured
6609 * @vf: the VF being configured
6610 * @txq: the max number of egress queues
6611 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
6612 * @rxqi: the max number of interrupt-capable ingress queues
6613 * @rxq: the max number of interruptless ingress queues
6614 * @tc: the PCI traffic class
6615 * @vi: the max number of virtual interfaces
6616 * @cmask: the channel access rights mask for the PF/VF
6617 * @pmask: the port access rights mask for the PF/VF
6618 * @nexact: the maximum number of exact MPS filters
6619 * @rcaps: read capabilities
6620 * @wxcaps: write/execute capabilities
6622 * Configures resource limits and capabilities for a physical or virtual
6625 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
6626 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
6627 unsigned int rxqi, unsigned int rxq, unsigned int tc,
6628 unsigned int vi, unsigned int cmask, unsigned int pmask,
6629 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
6631 struct fw_pfvf_cmd c;
6633 memset(&c, 0, sizeof(c));
6634 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
6635 F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
6636 V_FW_PFVF_CMD_VFN(vf));
6637 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6638 c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
6639 V_FW_PFVF_CMD_NIQ(rxq));
6640 c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) |
6641 V_FW_PFVF_CMD_PMASK(pmask) |
6642 V_FW_PFVF_CMD_NEQ(txq));
6643 c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) |
6644 V_FW_PFVF_CMD_NVI(vi) |
6645 V_FW_PFVF_CMD_NEXACTF(nexact));
6646 c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) |
6647 V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
6648 V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
6649 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6653 * t4_alloc_vi_func - allocate a virtual interface
6654 * @adap: the adapter
6655 * @mbox: mailbox to use for the FW command
6656 * @port: physical port associated with the VI
6657 * @pf: the PF owning the VI
6658 * @vf: the VF owning the VI
6659 * @nmac: number of MAC addresses needed (1 to 5)
6660 * @mac: the MAC addresses of the VI
6661 * @rss_size: size of RSS table slice associated with this VI
6662 * @portfunc: which Port Application Function MAC Address is desired
6663 * @idstype: Intrusion Detection Type
6665 * Allocates a virtual interface for the given physical port. If @mac is
6666 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
6667 * If @rss_size is %NULL the VI is not assigned any RSS slice by FW.
6668 * @mac should be large enough to hold @nmac Ethernet addresses, they are
6669 * stored consecutively so the space needed is @nmac * 6 bytes.
6670 * Returns a negative error number or the non-negative VI id.
6672 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
6673 unsigned int port, unsigned int pf, unsigned int vf,
6674 unsigned int nmac, u8 *mac, u16 *rss_size,
6675 unsigned int portfunc, unsigned int idstype)
6680 memset(&c, 0, sizeof(c));
6681 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
6682 F_FW_CMD_WRITE | F_FW_CMD_EXEC |
6683 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
6684 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
6685 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
6686 V_FW_VI_CMD_FUNC(portfunc));
6687 c.portid_pkd = V_FW_VI_CMD_PORTID(port);
6690 c.norss_rsssize = F_FW_VI_CMD_NORSS;
6692 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6697 memcpy(mac, c.mac, sizeof(c.mac));
6700 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
6702 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
6704 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
6706 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
6710 *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
6711 return G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid));
6715 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface
6716 * @adap: the adapter
6717 * @mbox: mailbox to use for the FW command
6718 * @port: physical port associated with the VI
6719 * @pf: the PF owning the VI
6720 * @vf: the VF owning the VI
6721 * @nmac: number of MAC addresses needed (1 to 5)
6722 * @mac: the MAC addresses of the VI
6723 * @rss_size: size of RSS table slice associated with this VI
6725 * backwards compatible and convieniance routine to allocate a Virtual
6726 * Interface with a Ethernet Port Application Function and Intrustion
6727 * Detection System disabled.
6729 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
6730 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
6733 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
6738 * t4_free_vi - free a virtual interface
6739 * @adap: the adapter
6740 * @mbox: mailbox to use for the FW command
6741 * @pf: the PF owning the VI
6742 * @vf: the VF owning the VI
6743 * @viid: virtual interface identifiler
6745 * Free a previously allocated virtual interface.
6747 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
6748 unsigned int vf, unsigned int viid)
6752 memset(&c, 0, sizeof(c));
6753 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
6756 V_FW_VI_CMD_PFN(pf) |
6757 V_FW_VI_CMD_VFN(vf));
6758 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
6759 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
6761 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6765 * t4_set_rxmode - set Rx properties of a virtual interface
6766 * @adap: the adapter
6767 * @mbox: mailbox to use for the FW command
6769 * @mtu: the new MTU or -1
6770 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
6771 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
6772 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
6773 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
6774 * @sleep_ok: if true we may sleep while awaiting command completion
6776 * Sets Rx properties of a virtual interface.
6778 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
6779 int mtu, int promisc, int all_multi, int bcast, int vlanex,
6782 struct fw_vi_rxmode_cmd c;
6784 /* convert to FW values */
6786 mtu = M_FW_VI_RXMODE_CMD_MTU;
6788 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
6790 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
6792 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
6794 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
6796 memset(&c, 0, sizeof(c));
6797 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
6798 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
6799 V_FW_VI_RXMODE_CMD_VIID(viid));
6800 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6802 cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
6803 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
6804 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
6805 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
6806 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
6807 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
6811 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
6812 * @adap: the adapter
6813 * @mbox: mailbox to use for the FW command
6815 * @free: if true any existing filters for this VI id are first removed
6816 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
6817 * @addr: the MAC address(es)
6818 * @idx: where to store the index of each allocated filter
6819 * @hash: pointer to hash address filter bitmap
6820 * @sleep_ok: call is allowed to sleep
6822 * Allocates an exact-match filter for each of the supplied addresses and
6823 * sets it to the corresponding address. If @idx is not %NULL it should
6824 * have at least @naddr entries, each of which will be set to the index of
6825 * the filter allocated for the corresponding MAC address. If a filter
6826 * could not be allocated for an address its index is set to 0xffff.
6827 * If @hash is not %NULL addresses that fail to allocate an exact filter
6828 * are hashed and update the hash filter bitmap pointed at by @hash.
6830 * Returns a negative error number or the number of filters allocated.
6832 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
6833 unsigned int viid, bool free, unsigned int naddr,
6834 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
6836 int offset, ret = 0;
6837 struct fw_vi_mac_cmd c;
6838 unsigned int nfilters = 0;
6839 unsigned int max_naddr = adap->chip_params->mps_tcam_size;
6840 unsigned int rem = naddr;
6842 if (naddr > max_naddr)
6845 for (offset = 0; offset < naddr ; /**/) {
6846 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
6848 : ARRAY_SIZE(c.u.exact));
6849 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
6850 u.exact[fw_naddr]), 16);
6851 struct fw_vi_mac_exact *p;
6854 memset(&c, 0, sizeof(c));
6855 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
6858 V_FW_CMD_EXEC(free) |
6859 V_FW_VI_MAC_CMD_VIID(viid));
6860 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) |
6861 V_FW_CMD_LEN16(len16));
6863 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
6865 cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
6866 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
6867 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
6871 * It's okay if we run out of space in our MAC address arena.
6872 * Some of the addresses we submit may get stored so we need
6873 * to run through the reply to see what the results were ...
6875 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
6876 if (ret && ret != -FW_ENOMEM)
6879 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
6880 u16 index = G_FW_VI_MAC_CMD_IDX(
6881 be16_to_cpu(p->valid_to_idx));
6884 idx[offset+i] = (index >= max_naddr
6887 if (index < max_naddr)
6890 *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
6898 if (ret == 0 || ret == -FW_ENOMEM)
6904 * t4_change_mac - modifies the exact-match filter for a MAC address
6905 * @adap: the adapter
6906 * @mbox: mailbox to use for the FW command
6908 * @idx: index of existing filter for old value of MAC address, or -1
6909 * @addr: the new MAC address value
6910 * @persist: whether a new MAC allocation should be persistent
6911 * @add_smt: if true also add the address to the HW SMT
6913 * Modifies an exact-match filter and sets it to the new MAC address if
6914 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
6915 * latter case the address is added persistently if @persist is %true.
6917 * Note that in general it is not possible to modify the value of a given
6918 * filter so the generic way to modify an address filter is to free the one
6919 * being used by the old address value and allocate a new filter for the
6920 * new address value.
6922 * Returns a negative error number or the index of the filter with the new
6923 * MAC value. Note that this index may differ from @idx.
6925 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
6926 int idx, const u8 *addr, bool persist, bool add_smt)
6929 struct fw_vi_mac_cmd c;
6930 struct fw_vi_mac_exact *p = c.u.exact;
6931 unsigned int max_mac_addr = adap->chip_params->mps_tcam_size;
6933 if (idx < 0) /* new allocation */
6934 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
6935 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
6937 memset(&c, 0, sizeof(c));
6938 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
6939 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
6940 V_FW_VI_MAC_CMD_VIID(viid));
6941 c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1));
6942 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
6943 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
6944 V_FW_VI_MAC_CMD_IDX(idx));
6945 memcpy(p->macaddr, addr, sizeof(p->macaddr));
6947 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6949 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
6950 if (ret >= max_mac_addr)
6957 * t4_set_addr_hash - program the MAC inexact-match hash filter
6958 * @adap: the adapter
6959 * @mbox: mailbox to use for the FW command
6961 * @ucast: whether the hash filter should also match unicast addresses
6962 * @vec: the value to be written to the hash filter
6963 * @sleep_ok: call is allowed to sleep
6965 * Sets the 64-bit inexact-match hash filter for a virtual interface.
6967 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
6968 bool ucast, u64 vec, bool sleep_ok)
6970 struct fw_vi_mac_cmd c;
6973 memset(&c, 0, sizeof(c));
6974 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
6975 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
6976 V_FW_VI_ENABLE_CMD_VIID(viid));
6977 val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) |
6978 V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1);
6979 c.freemacs_to_len16 = cpu_to_be32(val);
6980 c.u.hash.hashvec = cpu_to_be64(vec);
6981 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
6985 * t4_enable_vi_params - enable/disable a virtual interface
6986 * @adap: the adapter
6987 * @mbox: mailbox to use for the FW command
6989 * @rx_en: 1=enable Rx, 0=disable Rx
6990 * @tx_en: 1=enable Tx, 0=disable Tx
6991 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
6993 * Enables/disables a virtual interface. Note that setting DCB Enable
6994 * only makes sense when enabling a Virtual Interface ...
6996 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
6997 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
6999 struct fw_vi_enable_cmd c;
7001 memset(&c, 0, sizeof(c));
7002 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
7003 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7004 V_FW_VI_ENABLE_CMD_VIID(viid));
7005 c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
7006 V_FW_VI_ENABLE_CMD_EEN(tx_en) |
7007 V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
7009 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
7013 * t4_enable_vi - enable/disable a virtual interface
7014 * @adap: the adapter
7015 * @mbox: mailbox to use for the FW command
7017 * @rx_en: 1=enable Rx, 0=disable Rx
7018 * @tx_en: 1=enable Tx, 0=disable Tx
7020 * Enables/disables a virtual interface. Note that setting DCB Enable
7021 * only makes sense when enabling a Virtual Interface ...
7023 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
7024 bool rx_en, bool tx_en)
7026 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
7030 * t4_identify_port - identify a VI's port by blinking its LED
7031 * @adap: the adapter
7032 * @mbox: mailbox to use for the FW command
7034 * @nblinks: how many times to blink LED at 2.5 Hz
7036 * Identifies a VI's port by blinking its LED.
7038 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
7039 unsigned int nblinks)
7041 struct fw_vi_enable_cmd c;
7043 memset(&c, 0, sizeof(c));
7044 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
7045 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7046 V_FW_VI_ENABLE_CMD_VIID(viid));
7047 c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
7048 c.blinkdur = cpu_to_be16(nblinks);
7049 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7053 * t4_iq_stop - stop an ingress queue and its FLs
7054 * @adap: the adapter
7055 * @mbox: mailbox to use for the FW command
7056 * @pf: the PF owning the queues
7057 * @vf: the VF owning the queues
7058 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
7059 * @iqid: ingress queue id
7060 * @fl0id: FL0 queue id or 0xffff if no attached FL0
7061 * @fl1id: FL1 queue id or 0xffff if no attached FL1
7063 * Stops an ingress queue and its associated FLs, if any. This causes
7064 * any current or future data/messages destined for these queues to be
7067 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
7068 unsigned int vf, unsigned int iqtype, unsigned int iqid,
7069 unsigned int fl0id, unsigned int fl1id)
7073 memset(&c, 0, sizeof(c));
7074 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
7075 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
7076 V_FW_IQ_CMD_VFN(vf));
7077 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c));
7078 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
7079 c.iqid = cpu_to_be16(iqid);
7080 c.fl0id = cpu_to_be16(fl0id);
7081 c.fl1id = cpu_to_be16(fl1id);
7082 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7086 * t4_iq_free - free an ingress queue and its FLs
7087 * @adap: the adapter
7088 * @mbox: mailbox to use for the FW command
7089 * @pf: the PF owning the queues
7090 * @vf: the VF owning the queues
7091 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
7092 * @iqid: ingress queue id
7093 * @fl0id: FL0 queue id or 0xffff if no attached FL0
7094 * @fl1id: FL1 queue id or 0xffff if no attached FL1
7096 * Frees an ingress queue and its associated FLs, if any.
7098 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7099 unsigned int vf, unsigned int iqtype, unsigned int iqid,
7100 unsigned int fl0id, unsigned int fl1id)
7104 memset(&c, 0, sizeof(c));
7105 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
7106 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
7107 V_FW_IQ_CMD_VFN(vf));
7108 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
7109 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
7110 c.iqid = cpu_to_be16(iqid);
7111 c.fl0id = cpu_to_be16(fl0id);
7112 c.fl1id = cpu_to_be16(fl1id);
7113 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7117 * t4_eth_eq_free - free an Ethernet egress queue
7118 * @adap: the adapter
7119 * @mbox: mailbox to use for the FW command
7120 * @pf: the PF owning the queue
7121 * @vf: the VF owning the queue
7122 * @eqid: egress queue id
7124 * Frees an Ethernet egress queue.
7126 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7127 unsigned int vf, unsigned int eqid)
7129 struct fw_eq_eth_cmd c;
7131 memset(&c, 0, sizeof(c));
7132 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
7133 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7134 V_FW_EQ_ETH_CMD_PFN(pf) |
7135 V_FW_EQ_ETH_CMD_VFN(vf));
7136 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
7137 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
7138 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7142 * t4_ctrl_eq_free - free a control egress queue
7143 * @adap: the adapter
7144 * @mbox: mailbox to use for the FW command
7145 * @pf: the PF owning the queue
7146 * @vf: the VF owning the queue
7147 * @eqid: egress queue id
7149 * Frees a control egress queue.
7151 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7152 unsigned int vf, unsigned int eqid)
7154 struct fw_eq_ctrl_cmd c;
7156 memset(&c, 0, sizeof(c));
7157 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
7158 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7159 V_FW_EQ_CTRL_CMD_PFN(pf) |
7160 V_FW_EQ_CTRL_CMD_VFN(vf));
7161 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
7162 c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
7163 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7167 * t4_ofld_eq_free - free an offload egress queue
7168 * @adap: the adapter
7169 * @mbox: mailbox to use for the FW command
7170 * @pf: the PF owning the queue
7171 * @vf: the VF owning the queue
7172 * @eqid: egress queue id
7174 * Frees a control egress queue.
7176 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7177 unsigned int vf, unsigned int eqid)
7179 struct fw_eq_ofld_cmd c;
7181 memset(&c, 0, sizeof(c));
7182 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) |
7183 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7184 V_FW_EQ_OFLD_CMD_PFN(pf) |
7185 V_FW_EQ_OFLD_CMD_VFN(vf));
7186 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
7187 c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid));
7188 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7192 * t4_link_down_rc_str - return a string for a Link Down Reason Code
7193 * @link_down_rc: Link Down Reason Code
7195 * Returns a string representation of the Link Down Reason Code.
7197 const char *t4_link_down_rc_str(unsigned char link_down_rc)
7199 static const char *reason[] = {
7202 "Auto-negotiation Failure",
7204 "Insufficient Airflow",
7205 "Unable To Determine Reason",
7206 "No RX Signal Detected",
7210 if (link_down_rc >= ARRAY_SIZE(reason))
7211 return "Bad Reason Code";
7213 return reason[link_down_rc];
7217 * t4_handle_fw_rpl - process a FW reply message
7218 * @adap: the adapter
7219 * @rpl: start of the FW message
7221 * Processes a FW message, such as link state change messages.
7223 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
7225 u8 opcode = *(const u8 *)rpl;
7226 const struct fw_port_cmd *p = (const void *)rpl;
7227 unsigned int action =
7228 G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
7230 if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
7231 /* link/module state change message */
7232 int speed = 0, fc = 0, i;
7233 int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
7234 struct port_info *pi = NULL;
7235 struct link_config *lc;
7236 u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
7237 int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
7238 u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
7240 if (stat & F_FW_PORT_CMD_RXPAUSE)
7242 if (stat & F_FW_PORT_CMD_TXPAUSE)
7244 if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
7246 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
7248 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
7250 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
7253 for_each_port(adap, i) {
7254 pi = adap2pinfo(adap, i);
7255 if (pi->tx_chan == chan)
7260 if (mod != pi->mod_type) {
7262 t4_os_portmod_changed(adap, i);
7264 if (link_ok != lc->link_ok || speed != lc->speed ||
7265 fc != lc->fc) { /* something changed */
7268 if (!link_ok && lc->link_ok)
7269 reason = G_FW_PORT_CMD_LINKDNRC(stat);
7273 lc->link_ok = link_ok;
7276 lc->supported = be16_to_cpu(p->u.info.pcap);
7277 t4_os_link_changed(adap, i, link_ok, reason);
7280 CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode);
7287 * get_pci_mode - determine a card's PCI mode
7288 * @adapter: the adapter
7289 * @p: where to store the PCI settings
7291 * Determines a card's PCI mode and associated parameters, such as speed
7294 static void get_pci_mode(struct adapter *adapter,
7295 struct pci_params *p)
7300 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
7302 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
7303 p->speed = val & PCI_EXP_LNKSTA_CLS;
7304 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
7309 * init_link_config - initialize a link's SW state
7310 * @lc: structure holding the link state
7311 * @caps: link capabilities
7313 * Initializes the SW state maintained for each link, including the link's
7314 * capabilities and default speed/flow-control/autonegotiation settings.
7316 static void init_link_config(struct link_config *lc, unsigned int caps)
7318 lc->supported = caps;
7319 lc->requested_speed = 0;
7321 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
7322 if (lc->supported & FW_PORT_CAP_ANEG) {
7323 lc->advertising = lc->supported & ADVERT_MASK;
7324 lc->autoneg = AUTONEG_ENABLE;
7325 lc->requested_fc |= PAUSE_AUTONEG;
7327 lc->advertising = 0;
7328 lc->autoneg = AUTONEG_DISABLE;
7333 u32 vendor_and_model_id;
7337 int t4_get_flash_params(struct adapter *adapter)
7340 * Table for non-Numonix supported flash parts. Numonix parts are left
7341 * to the preexisting well-tested code. All flash parts have 64KB
7344 static struct flash_desc supported_flash[] = {
7345 { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
7351 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
7353 ret = sf1_read(adapter, 3, 0, 1, &info);
7354 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
7358 for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret)
7359 if (supported_flash[ret].vendor_and_model_id == info) {
7360 adapter->params.sf_size = supported_flash[ret].size_mb;
7361 adapter->params.sf_nsec =
7362 adapter->params.sf_size / SF_SEC_SIZE;
7366 if ((info & 0xff) != 0x20) /* not a Numonix flash */
7368 info >>= 16; /* log2 of size */
7369 if (info >= 0x14 && info < 0x18)
7370 adapter->params.sf_nsec = 1 << (info - 16);
7371 else if (info == 0x18)
7372 adapter->params.sf_nsec = 64;
7375 adapter->params.sf_size = 1 << info;
7378 * We should ~probably~ reject adapters with FLASHes which are too
7379 * small but we have some legacy FPGAs with small FLASHes that we'd
7380 * still like to use. So instead we emit a scary message ...
7382 if (adapter->params.sf_size < FLASH_MIN_SIZE)
7383 CH_WARN(adapter, "WARNING!!! FLASH size %#x < %#x!!!\n",
7384 adapter->params.sf_size, FLASH_MIN_SIZE);
7389 static void set_pcie_completion_timeout(struct adapter *adapter,
7395 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
7397 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
7400 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
7404 static const struct chip_params *get_chip_params(int chipid)
7406 static const struct chip_params chip_params[] = {
7410 .pm_stats_cnt = PM_NSTATS,
7411 .cng_ch_bits_log = 2,
7413 .cim_num_obq = CIM_NUM_OBQ,
7414 .mps_rplc_size = 128,
7416 .sge_fl_db = F_DBPRIO,
7417 .mps_tcam_size = NUM_MPS_CLS_SRAM_L_INSTANCES,
7422 .pm_stats_cnt = PM_NSTATS,
7423 .cng_ch_bits_log = 2,
7425 .cim_num_obq = CIM_NUM_OBQ_T5,
7426 .mps_rplc_size = 128,
7428 .sge_fl_db = F_DBPRIO | F_DBTYPE,
7429 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
7434 .pm_stats_cnt = T6_PM_NSTATS,
7435 .cng_ch_bits_log = 3,
7437 .cim_num_obq = CIM_NUM_OBQ_T5,
7438 .mps_rplc_size = 256,
7441 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
7445 chipid -= CHELSIO_T4;
7446 if (chipid < 0 || chipid >= ARRAY_SIZE(chip_params))
7449 return &chip_params[chipid];
7453 * t4_prep_adapter - prepare SW and HW for operation
7454 * @adapter: the adapter
7455 * @buf: temporary space of at least VPD_LEN size provided by the caller.
7457 * Initialize adapter SW state for the various HW modules, set initial
7458 * values for some adapter tunables, take PHYs out of reset, and
7459 * initialize the MDIO interface.
7461 int t4_prep_adapter(struct adapter *adapter, u8 *buf)
7467 get_pci_mode(adapter, &adapter->params.pci);
7469 pl_rev = t4_read_reg(adapter, A_PL_REV);
7470 adapter->params.chipid = G_CHIPID(pl_rev);
7471 adapter->params.rev = G_REV(pl_rev);
7472 if (adapter->params.chipid == 0) {
7473 /* T4 did not have chipid in PL_REV (T5 onwards do) */
7474 adapter->params.chipid = CHELSIO_T4;
7476 /* T4A1 chip is not supported */
7477 if (adapter->params.rev == 1) {
7478 CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n");
7483 adapter->chip_params = get_chip_params(chip_id(adapter));
7484 if (adapter->chip_params == NULL)
7487 adapter->params.pci.vpd_cap_addr =
7488 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
7490 ret = t4_get_flash_params(adapter);
7494 ret = get_vpd_params(adapter, &adapter->params.vpd, buf);
7498 /* Cards with real ASICs have the chipid in the PCIe device id */
7499 t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id);
7500 if (device_id >> 12 == chip_id(adapter))
7501 adapter->params.cim_la_size = CIMLA_SIZE;
7504 adapter->params.fpga = 1;
7505 adapter->params.cim_la_size = 2 * CIMLA_SIZE;
7508 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
7511 * Default port and clock for debugging in case we can't reach FW.
7513 adapter->params.nports = 1;
7514 adapter->params.portvec = 1;
7515 adapter->params.vpd.cclk = 50000;
7517 /* Set pci completion timeout value to 4 seconds. */
7518 set_pcie_completion_timeout(adapter, 0xd);
7523 * t4_shutdown_adapter - shut down adapter, host & wire
7524 * @adapter: the adapter
7526 * Perform an emergency shutdown of the adapter and stop it from
7527 * continuing any further communication on the ports or DMA to the
7528 * host. This is typically used when the adapter and/or firmware
7529 * have crashed and we want to prevent any further accidental
7530 * communication with the rest of the world. This will also force
7531 * the port Link Status to go down -- if register writes work --
7532 * which should help our peers figure out that we're down.
7534 int t4_shutdown_adapter(struct adapter *adapter)
7538 t4_intr_disable(adapter);
7539 t4_write_reg(adapter, A_DBG_GPIO_EN, 0);
7540 for_each_port(adapter, port) {
7541 u32 a_port_cfg = PORT_REG(port,
7546 t4_write_reg(adapter, a_port_cfg,
7547 t4_read_reg(adapter, a_port_cfg)
7548 & ~V_SIGNAL_DET(1));
7550 t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0);
7556 * t4_init_devlog_params - initialize adapter->params.devlog
7557 * @adap: the adapter
7558 * @fw_attach: whether we can talk to the firmware
7560 * Initialize various fields of the adapter's Firmware Device Log
7561 * Parameters structure.
7563 int t4_init_devlog_params(struct adapter *adap, int fw_attach)
7565 struct devlog_params *dparams = &adap->params.devlog;
7567 unsigned int devlog_meminfo;
7568 struct fw_devlog_cmd devlog_cmd;
7571 /* If we're dealing with newer firmware, the Device Log Paramerters
7572 * are stored in a designated register which allows us to access the
7573 * Device Log even if we can't talk to the firmware.
7576 t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG));
7578 unsigned int nentries, nentries128;
7580 dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams);
7581 dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4;
7583 nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams);
7584 nentries = (nentries128 + 1) * 128;
7585 dparams->size = nentries * sizeof(struct fw_devlog_e);
7591 * For any failing returns ...
7593 memset(dparams, 0, sizeof *dparams);
7596 * If we can't talk to the firmware, there's really nothing we can do
7602 /* Otherwise, ask the firmware for it's Device Log Parameters.
7604 memset(&devlog_cmd, 0, sizeof devlog_cmd);
7605 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
7606 F_FW_CMD_REQUEST | F_FW_CMD_READ);
7607 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
7608 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
7614 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
7615 dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo);
7616 dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4;
7617 dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
7623 * t4_init_sge_params - initialize adap->params.sge
7624 * @adapter: the adapter
7626 * Initialize various fields of the adapter's SGE Parameters structure.
7628 int t4_init_sge_params(struct adapter *adapter)
7631 struct sge_params *sp = &adapter->params.sge;
7633 r = t4_read_reg(adapter, A_SGE_INGRESS_RX_THRESHOLD);
7634 sp->counter_val[0] = G_THRESHOLD_0(r);
7635 sp->counter_val[1] = G_THRESHOLD_1(r);
7636 sp->counter_val[2] = G_THRESHOLD_2(r);
7637 sp->counter_val[3] = G_THRESHOLD_3(r);
7639 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_0_AND_1);
7640 sp->timer_val[0] = core_ticks_to_us(adapter, G_TIMERVALUE0(r));
7641 sp->timer_val[1] = core_ticks_to_us(adapter, G_TIMERVALUE1(r));
7642 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_2_AND_3);
7643 sp->timer_val[2] = core_ticks_to_us(adapter, G_TIMERVALUE2(r));
7644 sp->timer_val[3] = core_ticks_to_us(adapter, G_TIMERVALUE3(r));
7645 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_4_AND_5);
7646 sp->timer_val[4] = core_ticks_to_us(adapter, G_TIMERVALUE4(r));
7647 sp->timer_val[5] = core_ticks_to_us(adapter, G_TIMERVALUE5(r));
7649 r = t4_read_reg(adapter, A_SGE_CONM_CTRL);
7650 sp->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1;
7652 sp->fl_starve_threshold2 = sp->fl_starve_threshold;
7654 sp->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(r) * 2 + 1;
7656 /* egress queues: log2 of # of doorbells per BAR2 page */
7657 r = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
7658 r >>= S_QUEUESPERPAGEPF0 +
7659 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
7660 sp->eq_s_qpp = r & M_QUEUESPERPAGEPF0;
7662 /* ingress queues: log2 of # of doorbells per BAR2 page */
7663 r = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
7664 r >>= S_QUEUESPERPAGEPF0 +
7665 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
7666 sp->iq_s_qpp = r & M_QUEUESPERPAGEPF0;
7668 r = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
7669 r >>= S_HOSTPAGESIZEPF0 +
7670 (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf;
7671 sp->page_shift = (r & M_HOSTPAGESIZEPF0) + 10;
7673 r = t4_read_reg(adapter, A_SGE_CONTROL);
7674 sp->spg_len = r & F_EGRSTATUSPAGESIZE ? 128 : 64;
7675 sp->fl_pktshift = G_PKTSHIFT(r);
7676 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) + 5);
7678 sp->pack_boundary = sp->pad_boundary;
7680 r = t4_read_reg(adapter, A_SGE_CONTROL2);
7681 if (G_INGPACKBOUNDARY(r) == 0)
7682 sp->pack_boundary = 16;
7684 sp->pack_boundary = 1 << (G_INGPACKBOUNDARY(r) + 5);
7691 * Read and cache the adapter's compressed filter mode and ingress config.
7693 static void read_filter_mode_and_ingress_config(struct adapter *adap)
7695 struct tp_params *tpp = &adap->params.tp;
7697 if (t4_use_ldst(adap)) {
7698 t4_fw_tp_pio_rw(adap, &tpp->vlan_pri_map, 1,
7699 A_TP_VLAN_PRI_MAP, 1);
7700 t4_fw_tp_pio_rw(adap, &tpp->ingress_config, 1,
7701 A_TP_INGRESS_CONFIG, 1);
7703 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
7704 &tpp->vlan_pri_map, 1, A_TP_VLAN_PRI_MAP);
7705 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
7706 &tpp->ingress_config, 1, A_TP_INGRESS_CONFIG);
7710 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
7711 * shift positions of several elements of the Compressed Filter Tuple
7712 * for this adapter which we need frequently ...
7714 tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE);
7715 tpp->port_shift = t4_filter_field_shift(adap, F_PORT);
7716 tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
7717 tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN);
7718 tpp->tos_shift = t4_filter_field_shift(adap, F_TOS);
7719 tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
7720 tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE);
7721 tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH);
7722 tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE);
7723 tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION);
7726 * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
7727 * represents the presense of an Outer VLAN instead of a VNIC ID.
7729 if ((tpp->ingress_config & F_VNIC) == 0)
7730 tpp->vnic_shift = -1;
7734 * t4_init_tp_params - initialize adap->params.tp
7735 * @adap: the adapter
7737 * Initialize various fields of the adapter's TP Parameters structure.
7739 int t4_init_tp_params(struct adapter *adap)
7743 struct tp_params *tpp = &adap->params.tp;
7745 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
7746 tpp->tre = G_TIMERRESOLUTION(v);
7747 tpp->dack_re = G_DELAYEDACKRESOLUTION(v);
7749 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
7750 for (chan = 0; chan < MAX_NCHAN; chan++)
7751 tpp->tx_modq[chan] = chan;
7753 read_filter_mode_and_ingress_config(adap);
7756 * For T6, cache the adapter's compressed error vector
7757 * and passing outer header info for encapsulated packets.
7759 if (chip_id(adap) > CHELSIO_T5) {
7760 v = t4_read_reg(adap, A_TP_OUT_CONFIG);
7761 tpp->rx_pkt_encap = (v & F_CRXPKTENC) ? 1 : 0;
7768 * t4_filter_field_shift - calculate filter field shift
7769 * @adap: the adapter
7770 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
7772 * Return the shift position of a filter field within the Compressed
7773 * Filter Tuple. The filter field is specified via its selection bit
7774 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
7776 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
7778 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
7782 if ((filter_mode & filter_sel) == 0)
7785 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
7786 switch (filter_mode & sel) {
7788 field_shift += W_FT_FCOE;
7791 field_shift += W_FT_PORT;
7794 field_shift += W_FT_VNIC_ID;
7797 field_shift += W_FT_VLAN;
7800 field_shift += W_FT_TOS;
7803 field_shift += W_FT_PROTOCOL;
7806 field_shift += W_FT_ETHERTYPE;
7809 field_shift += W_FT_MACMATCH;
7812 field_shift += W_FT_MPSHITTYPE;
7814 case F_FRAGMENTATION:
7815 field_shift += W_FT_FRAGMENTATION;
7822 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id)
7826 struct fw_port_cmd c;
7828 struct port_info *p = adap2pinfo(adap, port_id);
7831 memset(&c, 0, sizeof(c));
7833 for (i = 0, j = -1; i <= p->port_id; i++) {
7836 } while ((adap->params.portvec & (1 << j)) == 0);
7839 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) |
7840 F_FW_CMD_REQUEST | F_FW_CMD_READ |
7841 V_FW_PORT_CMD_PORTID(j));
7842 c.action_to_len16 = htonl(
7843 V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
7845 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7849 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
7853 p->vi[0].viid = ret;
7855 p->rx_chan_map = t4_get_mps_bg_map(adap, j);
7857 p->vi[0].rss_size = rss_size;
7858 t4_os_set_hw_addr(adap, p->port_id, addr);
7860 ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
7861 p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
7862 G_FW_PORT_CMD_MDIOADDR(ret) : -1;
7863 p->port_type = G_FW_PORT_CMD_PTYPE(ret);
7864 p->mod_type = G_FW_PORT_CMD_MODTYPE(ret);
7866 init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap));
7868 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
7869 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
7870 V_FW_PARAMS_PARAM_YZ(p->vi[0].viid);
7871 ret = t4_query_params(adap, mbox, pf, vf, 1, ¶m, &val);
7873 p->vi[0].rss_base = 0xffff;
7875 /* MPASS((val >> 16) == rss_size); */
7876 p->vi[0].rss_base = val & 0xffff;
7883 * t4_read_cimq_cfg - read CIM queue configuration
7884 * @adap: the adapter
7885 * @base: holds the queue base addresses in bytes
7886 * @size: holds the queue sizes in bytes
7887 * @thres: holds the queue full thresholds in bytes
7889 * Returns the current configuration of the CIM queues, starting with
7890 * the IBQs, then the OBQs.
7892 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
7895 int cim_num_obq = adap->chip_params->cim_num_obq;
7897 for (i = 0; i < CIM_NUM_IBQ; i++) {
7898 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
7900 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
7901 /* value is in 256-byte units */
7902 *base++ = G_CIMQBASE(v) * 256;
7903 *size++ = G_CIMQSIZE(v) * 256;
7904 *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
7906 for (i = 0; i < cim_num_obq; i++) {
7907 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
7909 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
7910 /* value is in 256-byte units */
7911 *base++ = G_CIMQBASE(v) * 256;
7912 *size++ = G_CIMQSIZE(v) * 256;
7917 * t4_read_cim_ibq - read the contents of a CIM inbound queue
7918 * @adap: the adapter
7919 * @qid: the queue index
7920 * @data: where to store the queue contents
7921 * @n: capacity of @data in 32-bit words
7923 * Reads the contents of the selected CIM queue starting at address 0 up
7924 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
7925 * error and the number of 32-bit words actually read on success.
7927 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
7929 int i, err, attempts;
7931 const unsigned int nwords = CIM_IBQ_SIZE * 4;
7933 if (qid > 5 || (n & 3))
7936 addr = qid * nwords;
7940 /* It might take 3-10ms before the IBQ debug read access is allowed.
7941 * Wait for 1 Sec with a delay of 1 usec.
7945 for (i = 0; i < n; i++, addr++) {
7946 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
7948 err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
7952 *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
7954 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
7959 * t4_read_cim_obq - read the contents of a CIM outbound queue
7960 * @adap: the adapter
7961 * @qid: the queue index
7962 * @data: where to store the queue contents
7963 * @n: capacity of @data in 32-bit words
7965 * Reads the contents of the selected CIM queue starting at address 0 up
7966 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
7967 * error and the number of 32-bit words actually read on success.
7969 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
7972 unsigned int addr, v, nwords;
7973 int cim_num_obq = adap->chip_params->cim_num_obq;
7975 if ((qid > (cim_num_obq - 1)) || (n & 3))
7978 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
7979 V_QUENUMSELECT(qid));
7980 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
7982 addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */
7983 nwords = G_CIMQSIZE(v) * 64; /* same */
7987 for (i = 0; i < n; i++, addr++) {
7988 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
7990 err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
7994 *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
7996 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
8002 CIM_CTL_BASE = 0x2000,
8003 CIM_PBT_ADDR_BASE = 0x2800,
8004 CIM_PBT_LRF_BASE = 0x3000,
8005 CIM_PBT_DATA_BASE = 0x3800
8009 * t4_cim_read - read a block from CIM internal address space
8010 * @adap: the adapter
8011 * @addr: the start address within the CIM address space
8012 * @n: number of words to read
8013 * @valp: where to store the result
8015 * Reads a block of 4-byte words from the CIM intenal address space.
8017 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
8022 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
8025 for ( ; !ret && n--; addr += 4) {
8026 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
8027 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
8030 *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
8036 * t4_cim_write - write a block into CIM internal address space
8037 * @adap: the adapter
8038 * @addr: the start address within the CIM address space
8039 * @n: number of words to write
8040 * @valp: set of values to write
8042 * Writes a block of 4-byte words into the CIM intenal address space.
8044 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
8045 const unsigned int *valp)
8049 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
8052 for ( ; !ret && n--; addr += 4) {
8053 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
8054 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
8055 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
8061 static int t4_cim_write1(struct adapter *adap, unsigned int addr,
8064 return t4_cim_write(adap, addr, 1, &val);
8068 * t4_cim_ctl_read - read a block from CIM control region
8069 * @adap: the adapter
8070 * @addr: the start address within the CIM control region
8071 * @n: number of words to read
8072 * @valp: where to store the result
8074 * Reads a block of 4-byte words from the CIM control region.
8076 int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
8079 return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
8083 * t4_cim_read_la - read CIM LA capture buffer
8084 * @adap: the adapter
8085 * @la_buf: where to store the LA data
8086 * @wrptr: the HW write pointer within the capture buffer
8088 * Reads the contents of the CIM LA buffer with the most recent entry at
8089 * the end of the returned data and with the entry at @wrptr first.
8090 * We try to leave the LA in the running state we find it in.
8092 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
8095 unsigned int cfg, val, idx;
8097 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
8101 if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */
8102 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
8107 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
8111 idx = G_UPDBGLAWRPTR(val);
8115 for (i = 0; i < adap->params.cim_la_size; i++) {
8116 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
8117 V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
8120 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
8123 if (val & F_UPDBGLARDEN) {
8127 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
8131 /* address can't exceed 0xfff (UpDbgLaRdPtr is of 12-bits) */
8132 idx = (idx + 1) & M_UPDBGLARDPTR;
8134 * Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
8135 * identify the 32-bit portion of the full 312-bit data
8138 while ((idx & 0xf) > 9)
8139 idx = (idx + 1) % M_UPDBGLARDPTR;
8142 if (cfg & F_UPDBGLAEN) {
8143 int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
8144 cfg & ~F_UPDBGLARDEN);
8152 * t4_tp_read_la - read TP LA capture buffer
8153 * @adap: the adapter
8154 * @la_buf: where to store the LA data
8155 * @wrptr: the HW write pointer within the capture buffer
8157 * Reads the contents of the TP LA buffer with the most recent entry at
8158 * the end of the returned data and with the entry at @wrptr first.
8159 * We leave the LA in the running state we find it in.
8161 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
8163 bool last_incomplete;
8164 unsigned int i, cfg, val, idx;
8166 cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
8167 if (cfg & F_DBGLAENABLE) /* freeze LA */
8168 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
8169 adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
8171 val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
8172 idx = G_DBGLAWPTR(val);
8173 last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
8174 if (last_incomplete)
8175 idx = (idx + 1) & M_DBGLARPTR;
8180 val &= ~V_DBGLARPTR(M_DBGLARPTR);
8181 val |= adap->params.tp.la_mask;
8183 for (i = 0; i < TPLA_SIZE; i++) {
8184 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
8185 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
8186 idx = (idx + 1) & M_DBGLARPTR;
8189 /* Wipe out last entry if it isn't valid */
8190 if (last_incomplete)
8191 la_buf[TPLA_SIZE - 1] = ~0ULL;
8193 if (cfg & F_DBGLAENABLE) /* restore running state */
8194 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
8195 cfg | adap->params.tp.la_mask);
8199 * SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
8200 * seconds). If we find one of the SGE Ingress DMA State Machines in the same
8201 * state for more than the Warning Threshold then we'll issue a warning about
8202 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel
8203 * appears to be hung every Warning Repeat second till the situation clears.
8204 * If the situation clears, we'll note that as well.
8206 #define SGE_IDMA_WARN_THRESH 1
8207 #define SGE_IDMA_WARN_REPEAT 300
8210 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
8211 * @adapter: the adapter
8212 * @idma: the adapter IDMA Monitor state
8214 * Initialize the state of an SGE Ingress DMA Monitor.
8216 void t4_idma_monitor_init(struct adapter *adapter,
8217 struct sge_idma_monitor_state *idma)
8219 /* Initialize the state variables for detecting an SGE Ingress DMA
8220 * hang. The SGE has internal counters which count up on each clock
8221 * tick whenever the SGE finds its Ingress DMA State Engines in the
8222 * same state they were on the previous clock tick. The clock used is
8223 * the Core Clock so we have a limit on the maximum "time" they can
8224 * record; typically a very small number of seconds. For instance,
8225 * with a 600MHz Core Clock, we can only count up to a bit more than
8226 * 7s. So we'll synthesize a larger counter in order to not run the
8227 * risk of having the "timers" overflow and give us the flexibility to
8228 * maintain a Hung SGE State Machine of our own which operates across
8229 * a longer time frame.
8231 idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
8232 idma->idma_stalled[0] = idma->idma_stalled[1] = 0;
8236 * t4_idma_monitor - monitor SGE Ingress DMA state
8237 * @adapter: the adapter
8238 * @idma: the adapter IDMA Monitor state
8239 * @hz: number of ticks/second
8240 * @ticks: number of ticks since the last IDMA Monitor call
8242 void t4_idma_monitor(struct adapter *adapter,
8243 struct sge_idma_monitor_state *idma,
8246 int i, idma_same_state_cnt[2];
8248 /* Read the SGE Debug Ingress DMA Same State Count registers. These
8249 * are counters inside the SGE which count up on each clock when the
8250 * SGE finds its Ingress DMA State Engines in the same states they
8251 * were in the previous clock. The counters will peg out at
8252 * 0xffffffff without wrapping around so once they pass the 1s
8253 * threshold they'll stay above that till the IDMA state changes.
8255 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13);
8256 idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH);
8257 idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
8259 for (i = 0; i < 2; i++) {
8260 u32 debug0, debug11;
8262 /* If the Ingress DMA Same State Counter ("timer") is less
8263 * than 1s, then we can reset our synthesized Stall Timer and
8264 * continue. If we have previously emitted warnings about a
8265 * potential stalled Ingress Queue, issue a note indicating
8266 * that the Ingress Queue has resumed forward progress.
8268 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
8269 if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz)
8270 CH_WARN(adapter, "SGE idma%d, queue %u, "
8271 "resumed after %d seconds\n",
8272 i, idma->idma_qid[i],
8273 idma->idma_stalled[i]/hz);
8274 idma->idma_stalled[i] = 0;
8278 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
8279 * domain. The first time we get here it'll be because we
8280 * passed the 1s Threshold; each additional time it'll be
8281 * because the RX Timer Callback is being fired on its regular
8284 * If the stall is below our Potential Hung Ingress Queue
8285 * Warning Threshold, continue.
8287 if (idma->idma_stalled[i] == 0) {
8288 idma->idma_stalled[i] = hz;
8289 idma->idma_warn[i] = 0;
8291 idma->idma_stalled[i] += ticks;
8292 idma->idma_warn[i] -= ticks;
8295 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz)
8298 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
8300 if (idma->idma_warn[i] > 0)
8302 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz;
8304 /* Read and save the SGE IDMA State and Queue ID information.
8305 * We do this every time in case it changes across time ...
8306 * can't be too careful ...
8308 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0);
8309 debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
8310 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
8312 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11);
8313 debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
8314 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
8316 CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in "
8317 " state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
8318 i, idma->idma_qid[i], idma->idma_state[i],
8319 idma->idma_stalled[i]/hz,
8321 t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
8326 * t4_read_pace_tbl - read the pace table
8327 * @adap: the adapter
8328 * @pace_vals: holds the returned values
8330 * Returns the values of TP's pace table in microseconds.
8332 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
8336 for (i = 0; i < NTX_SCHED; i++) {
8337 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
8338 v = t4_read_reg(adap, A_TP_PACE_TABLE);
8339 pace_vals[i] = dack_ticks_to_usec(adap, v);
8344 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
8345 * @adap: the adapter
8346 * @sched: the scheduler index
8347 * @kbps: the byte rate in Kbps
8348 * @ipg: the interpacket delay in tenths of nanoseconds
8350 * Return the current configuration of a HW Tx scheduler.
8352 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
8355 unsigned int v, addr, bpt, cpt;
8358 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
8359 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
8360 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
8363 bpt = (v >> 8) & 0xff;
8366 *kbps = 0; /* scheduler disabled */
8368 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
8369 *kbps = (v * bpt) / 125;
8373 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
8374 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
8375 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
8379 *ipg = (10000 * v) / core_ticks_per_usec(adap);
8384 * t4_load_cfg - download config file
8385 * @adap: the adapter
8386 * @cfg_data: the cfg text file to write
8387 * @size: text file size
8389 * Write the supplied config text file to the card's serial flash.
8391 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
8393 int ret, i, n, cfg_addr;
8395 unsigned int flash_cfg_start_sec;
8396 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
8398 cfg_addr = t4_flash_cfg_addr(adap);
8403 flash_cfg_start_sec = addr / SF_SEC_SIZE;
8405 if (size > FLASH_CFG_MAX_SIZE) {
8406 CH_ERR(adap, "cfg file too large, max is %u bytes\n",
8407 FLASH_CFG_MAX_SIZE);
8411 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
8413 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
8414 flash_cfg_start_sec + i - 1);
8416 * If size == 0 then we're simply erasing the FLASH sectors associated
8417 * with the on-adapter Firmware Configuration File.
8419 if (ret || size == 0)
8422 /* this will write to the flash up to SF_PAGE_SIZE at a time */
8423 for (i = 0; i< size; i+= SF_PAGE_SIZE) {
8424 if ( (size - i) < SF_PAGE_SIZE)
8428 ret = t4_write_flash(adap, addr, n, cfg_data, 1);
8432 addr += SF_PAGE_SIZE;
8433 cfg_data += SF_PAGE_SIZE;
8438 CH_ERR(adap, "config file %s failed %d\n",
8439 (size == 0 ? "clear" : "download"), ret);
8444 * t5_fw_init_extern_mem - initialize the external memory
8445 * @adap: the adapter
8447 * Initializes the external memory on T5.
8449 int t5_fw_init_extern_mem(struct adapter *adap)
8451 u32 params[1], val[1];
8457 val[0] = 0xff; /* Initialize all MCs */
8458 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
8459 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT));
8460 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val,
8461 FW_CMD_MAX_TIMEOUT);
8466 /* BIOS boot headers */
8467 typedef struct pci_expansion_rom_header {
8468 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
8469 u8 reserved[22]; /* Reserved per processor Architecture data */
8470 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
8471 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
8473 /* Legacy PCI Expansion ROM Header */
8474 typedef struct legacy_pci_expansion_rom_header {
8475 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
8476 u8 size512; /* Current Image Size in units of 512 bytes */
8477 u8 initentry_point[4];
8478 u8 cksum; /* Checksum computed on the entire Image */
8479 u8 reserved[16]; /* Reserved */
8480 u8 pcir_offset[2]; /* Offset to PCI Data Struture */
8481 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
8483 /* EFI PCI Expansion ROM Header */
8484 typedef struct efi_pci_expansion_rom_header {
8485 u8 signature[2]; // ROM signature. The value 0xaa55
8486 u8 initialization_size[2]; /* Units 512. Includes this header */
8487 u8 efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
8488 u8 efi_subsystem[2]; /* Subsystem value for EFI image header */
8489 u8 efi_machine_type[2]; /* Machine type from EFI image header */
8490 u8 compression_type[2]; /* Compression type. */
8492 * Compression type definition
8495 * 0x2-0xFFFF: Reserved
8497 u8 reserved[8]; /* Reserved */
8498 u8 efi_image_header_offset[2]; /* Offset to EFI Image */
8499 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
8500 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
8502 /* PCI Data Structure Format */
8503 typedef struct pcir_data_structure { /* PCI Data Structure */
8504 u8 signature[4]; /* Signature. The string "PCIR" */
8505 u8 vendor_id[2]; /* Vendor Identification */
8506 u8 device_id[2]; /* Device Identification */
8507 u8 vital_product[2]; /* Pointer to Vital Product Data */
8508 u8 length[2]; /* PCIR Data Structure Length */
8509 u8 revision; /* PCIR Data Structure Revision */
8510 u8 class_code[3]; /* Class Code */
8511 u8 image_length[2]; /* Image Length. Multiple of 512B */
8512 u8 code_revision[2]; /* Revision Level of Code/Data */
8513 u8 code_type; /* Code Type. */
8515 * PCI Expansion ROM Code Types
8516 * 0x00: Intel IA-32, PC-AT compatible. Legacy
8517 * 0x01: Open Firmware standard for PCI. FCODE
8518 * 0x02: Hewlett-Packard PA RISC. HP reserved
8519 * 0x03: EFI Image. EFI
8520 * 0x04-0xFF: Reserved.
8522 u8 indicator; /* Indicator. Identifies the last image in the ROM */
8523 u8 reserved[2]; /* Reserved */
8524 } pcir_data_t; /* PCI__DATA_STRUCTURE */
8526 /* BOOT constants */
8528 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
8529 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */
8530 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */
8531 BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
8532 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment */
8533 VENDOR_ID = 0x1425, /* Vendor ID */
8534 PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
8538 * modify_device_id - Modifies the device ID of the Boot BIOS image
8539 * @adatper: the device ID to write.
8540 * @boot_data: the boot image to modify.
8542 * Write the supplied device ID to the boot BIOS image.
8544 static void modify_device_id(int device_id, u8 *boot_data)
8546 legacy_pci_exp_rom_header_t *header;
8547 pcir_data_t *pcir_header;
8551 * Loop through all chained images and change the device ID's
8554 header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
8555 pcir_header = (pcir_data_t *) &boot_data[cur_header +
8556 le16_to_cpu(*(u16*)header->pcir_offset)];
8559 * Only modify the Device ID if code type is Legacy or HP.
8560 * 0x00: Okay to modify
8561 * 0x01: FCODE. Do not be modify
8562 * 0x03: Okay to modify
8563 * 0x04-0xFF: Do not modify
8565 if (pcir_header->code_type == 0x00) {
8570 * Modify Device ID to match current adatper
8572 *(u16*) pcir_header->device_id = device_id;
8575 * Set checksum temporarily to 0.
8576 * We will recalculate it later.
8578 header->cksum = 0x0;
8581 * Calculate and update checksum
8583 for (i = 0; i < (header->size512 * 512); i++)
8584 csum += (u8)boot_data[cur_header + i];
8587 * Invert summed value to create the checksum
8588 * Writing new checksum value directly to the boot data
8590 boot_data[cur_header + 7] = -csum;
8592 } else if (pcir_header->code_type == 0x03) {
8595 * Modify Device ID to match current adatper
8597 *(u16*) pcir_header->device_id = device_id;
8603 * Check indicator element to identify if this is the last
8606 if (pcir_header->indicator & 0x80)
8610 * Move header pointer up to the next image in the ROM.
8612 cur_header += header->size512 * 512;
8617 * t4_load_boot - download boot flash
8618 * @adapter: the adapter
8619 * @boot_data: the boot image to write
8620 * @boot_addr: offset in flash to write boot_data
8623 * Write the supplied boot image to the card's serial flash.
8624 * The boot image has the following sections: a 28-byte header and the
8627 int t4_load_boot(struct adapter *adap, u8 *boot_data,
8628 unsigned int boot_addr, unsigned int size)
8630 pci_exp_rom_header_t *header;
8632 pcir_data_t *pcir_header;
8636 unsigned int boot_sector = (boot_addr * 1024 );
8637 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
8640 * Make sure the boot image does not encroach on the firmware region
8642 if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
8643 CH_ERR(adap, "boot image encroaching on firmware region\n");
8648 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
8649 * and Boot configuration data sections. These 3 boot sections span
8650 * sectors 0 to 7 in flash and live right before the FW image location.
8652 i = DIV_ROUND_UP(size ? size : FLASH_FW_START,
8654 ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
8655 (boot_sector >> 16) + i - 1);
8658 * If size == 0 then we're simply erasing the FLASH sectors associated
8659 * with the on-adapter option ROM file
8661 if (ret || (size == 0))
8664 /* Get boot header */
8665 header = (pci_exp_rom_header_t *)boot_data;
8666 pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
8667 /* PCIR Data Structure */
8668 pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
8671 * Perform some primitive sanity testing to avoid accidentally
8672 * writing garbage over the boot sectors. We ought to check for
8673 * more but it's not worth it for now ...
8675 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
8676 CH_ERR(adap, "boot image too small/large\n");
8680 #ifndef CHELSIO_T4_DIAGS
8682 * Check BOOT ROM header signature
8684 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
8685 CH_ERR(adap, "Boot image missing signature\n");
8690 * Check PCI header signature
8692 if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
8693 CH_ERR(adap, "PCI header missing signature\n");
8698 * Check Vendor ID matches Chelsio ID
8700 if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
8701 CH_ERR(adap, "Vendor ID missing signature\n");
8707 * Retrieve adapter's device ID
8709 t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
8710 /* Want to deal with PF 0 so I strip off PF 4 indicator */
8711 device_id = device_id & 0xf0ff;
8714 * Check PCIE Device ID
8716 if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
8718 * Change the device ID in the Boot BIOS image to match
8719 * the Device ID of the current adapter.
8721 modify_device_id(device_id, boot_data);
8725 * Skip over the first SF_PAGE_SIZE worth of data and write it after
8726 * we finish copying the rest of the boot image. This will ensure
8727 * that the BIOS boot header will only be written if the boot image
8728 * was written in full.
8731 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
8732 addr += SF_PAGE_SIZE;
8733 boot_data += SF_PAGE_SIZE;
8734 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
8739 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
8740 (const u8 *)header, 0);
8744 CH_ERR(adap, "boot image download failed, error %d\n", ret);
8749 * t4_flash_bootcfg_addr - return the address of the flash optionrom configuration
8750 * @adapter: the adapter
8752 * Return the address within the flash where the OptionROM Configuration
8753 * is stored, or an error if the device FLASH is too small to contain
8754 * a OptionROM Configuration.
8756 static int t4_flash_bootcfg_addr(struct adapter *adapter)
8759 * If the device FLASH isn't large enough to hold a Firmware
8760 * Configuration File, return an error.
8762 if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE)
8765 return FLASH_BOOTCFG_START;
8768 int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size)
8770 int ret, i, n, cfg_addr;
8772 unsigned int flash_cfg_start_sec;
8773 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
8775 cfg_addr = t4_flash_bootcfg_addr(adap);
8780 flash_cfg_start_sec = addr / SF_SEC_SIZE;
8782 if (size > FLASH_BOOTCFG_MAX_SIZE) {
8783 CH_ERR(adap, "bootcfg file too large, max is %u bytes\n",
8784 FLASH_BOOTCFG_MAX_SIZE);
8788 i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */
8790 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
8791 flash_cfg_start_sec + i - 1);
8794 * If size == 0 then we're simply erasing the FLASH sectors associated
8795 * with the on-adapter OptionROM Configuration File.
8797 if (ret || size == 0)
8800 /* this will write to the flash up to SF_PAGE_SIZE at a time */
8801 for (i = 0; i< size; i+= SF_PAGE_SIZE) {
8802 if ( (size - i) < SF_PAGE_SIZE)
8806 ret = t4_write_flash(adap, addr, n, cfg_data, 0);
8810 addr += SF_PAGE_SIZE;
8811 cfg_data += SF_PAGE_SIZE;
8816 CH_ERR(adap, "boot config data %s failed %d\n",
8817 (size == 0 ? "clear" : "download"), ret);
8822 * t4_set_filter_mode - configure the optional components of filter tuples
8823 * @adap: the adapter
8824 * @mode_map: a bitmap selcting which optional filter components to enable
8826 * Sets the filter mode by selecting the optional components to enable
8827 * in filter tuples. Returns 0 on success and a negative error if the
8828 * requested mode needs more bits than are available for optional
8831 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map)
8833 static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
8837 for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
8838 if (mode_map & (1 << i))
8840 if (nbits > FILTER_OPT_LEN)
8842 if (t4_use_ldst(adap))
8843 t4_fw_tp_pio_rw(adap, &mode_map, 1, A_TP_VLAN_PRI_MAP, 0);
8845 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map,
8846 1, A_TP_VLAN_PRI_MAP);
8847 read_filter_mode_and_ingress_config(adap);
8853 * t4_clr_port_stats - clear port statistics
8854 * @adap: the adapter
8855 * @idx: the port index
8857 * Clear HW statistics for the given port.
8859 void t4_clr_port_stats(struct adapter *adap, int idx)
8862 u32 bgmap = t4_get_mps_bg_map(adap, idx);
8866 port_base_addr = PORT_BASE(idx);
8868 port_base_addr = T5_PORT_BASE(idx);
8870 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
8871 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
8872 t4_write_reg(adap, port_base_addr + i, 0);
8873 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
8874 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
8875 t4_write_reg(adap, port_base_addr + i, 0);
8876 for (i = 0; i < 4; i++)
8877 if (bgmap & (1 << i)) {
8879 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
8881 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
8886 * t4_i2c_rd - read I2C data from adapter
8887 * @adap: the adapter
8888 * @port: Port number if per-port device; <0 if not
8889 * @devid: per-port device ID or absolute device ID
8890 * @offset: byte offset into device I2C space
8891 * @len: byte length of I2C space data
8892 * @buf: buffer in which to return I2C data
8894 * Reads the I2C data from the indicated device and location.
8896 int t4_i2c_rd(struct adapter *adap, unsigned int mbox,
8897 int port, unsigned int devid,
8898 unsigned int offset, unsigned int len,
8902 struct fw_ldst_cmd ldst;
8908 len > sizeof ldst.u.i2c.data)
8911 memset(&ldst, 0, sizeof ldst);
8912 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
8913 ldst.op_to_addrspace =
8914 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
8918 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
8919 ldst.u.i2c.pid = (port < 0 ? 0xff : port);
8920 ldst.u.i2c.did = devid;
8921 ldst.u.i2c.boffset = offset;
8922 ldst.u.i2c.blen = len;
8923 ret = t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
8925 memcpy(buf, ldst.u.i2c.data, len);
8930 * t4_i2c_wr - write I2C data to adapter
8931 * @adap: the adapter
8932 * @port: Port number if per-port device; <0 if not
8933 * @devid: per-port device ID or absolute device ID
8934 * @offset: byte offset into device I2C space
8935 * @len: byte length of I2C space data
8936 * @buf: buffer containing new I2C data
8938 * Write the I2C data to the indicated device and location.
8940 int t4_i2c_wr(struct adapter *adap, unsigned int mbox,
8941 int port, unsigned int devid,
8942 unsigned int offset, unsigned int len,
8946 struct fw_ldst_cmd ldst;
8951 len > sizeof ldst.u.i2c.data)
8954 memset(&ldst, 0, sizeof ldst);
8955 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
8956 ldst.op_to_addrspace =
8957 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
8961 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
8962 ldst.u.i2c.pid = (port < 0 ? 0xff : port);
8963 ldst.u.i2c.did = devid;
8964 ldst.u.i2c.boffset = offset;
8965 ldst.u.i2c.blen = len;
8966 memcpy(ldst.u.i2c.data, buf, len);
8967 return t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
8971 * t4_sge_ctxt_rd - read an SGE context through FW
8972 * @adap: the adapter
8973 * @mbox: mailbox to use for the FW command
8974 * @cid: the context id
8975 * @ctype: the context type
8976 * @data: where to store the context data
8978 * Issues a FW command through the given mailbox to read an SGE context.
8980 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
8981 enum ctxt_type ctype, u32 *data)
8984 struct fw_ldst_cmd c;
8986 if (ctype == CTXT_EGRESS)
8987 ret = FW_LDST_ADDRSPC_SGE_EGRC;
8988 else if (ctype == CTXT_INGRESS)
8989 ret = FW_LDST_ADDRSPC_SGE_INGC;
8990 else if (ctype == CTXT_FLM)
8991 ret = FW_LDST_ADDRSPC_SGE_FLMC;
8993 ret = FW_LDST_ADDRSPC_SGE_CONMC;
8995 memset(&c, 0, sizeof(c));
8996 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
8997 F_FW_CMD_REQUEST | F_FW_CMD_READ |
8998 V_FW_LDST_CMD_ADDRSPACE(ret));
8999 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
9000 c.u.idctxt.physid = cpu_to_be32(cid);
9002 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
9004 data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
9005 data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
9006 data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
9007 data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
9008 data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
9009 data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
9015 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
9016 * @adap: the adapter
9017 * @cid: the context id
9018 * @ctype: the context type
9019 * @data: where to store the context data
9021 * Reads an SGE context directly, bypassing FW. This is only for
9022 * debugging when FW is unavailable.
9024 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
9029 t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
9030 ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
9032 for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
9033 *data++ = t4_read_reg(adap, i);
9037 int t4_sched_config(struct adapter *adapter, int type, int minmaxen,
9040 struct fw_sched_cmd cmd;
9042 memset(&cmd, 0, sizeof(cmd));
9043 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
9046 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9048 cmd.u.config.sc = FW_SCHED_SC_CONFIG;
9049 cmd.u.config.type = type;
9050 cmd.u.config.minmaxen = minmaxen;
9052 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
9056 int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
9057 int rateunit, int ratemode, int channel, int cl,
9058 int minrate, int maxrate, int weight, int pktsize,
9061 struct fw_sched_cmd cmd;
9063 memset(&cmd, 0, sizeof(cmd));
9064 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
9067 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9069 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
9070 cmd.u.params.type = type;
9071 cmd.u.params.level = level;
9072 cmd.u.params.mode = mode;
9073 cmd.u.params.ch = channel;
9074 cmd.u.params.cl = cl;
9075 cmd.u.params.unit = rateunit;
9076 cmd.u.params.rate = ratemode;
9077 cmd.u.params.min = cpu_to_be32(minrate);
9078 cmd.u.params.max = cpu_to_be32(maxrate);
9079 cmd.u.params.weight = cpu_to_be16(weight);
9080 cmd.u.params.pktsize = cpu_to_be16(pktsize);
9082 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
9087 * t4_config_watchdog - configure (enable/disable) a watchdog timer
9088 * @adapter: the adapter
9089 * @mbox: mailbox to use for the FW command
9090 * @pf: the PF owning the queue
9091 * @vf: the VF owning the queue
9092 * @timeout: watchdog timeout in ms
9093 * @action: watchdog timer / action
9095 * There are separate watchdog timers for each possible watchdog
9096 * action. Configure one of the watchdog timers by setting a non-zero
9097 * timeout. Disable a watchdog timer by using a timeout of zero.
9099 int t4_config_watchdog(struct adapter *adapter, unsigned int mbox,
9100 unsigned int pf, unsigned int vf,
9101 unsigned int timeout, unsigned int action)
9103 struct fw_watchdog_cmd wdog;
9107 * The watchdog command expects a timeout in units of 10ms so we need
9108 * to convert it here (via rounding) and force a minimum of one 10ms
9109 * "tick" if the timeout is non-zero but the convertion results in 0
9112 ticks = (timeout + 5)/10;
9113 if (timeout && !ticks)
9116 memset(&wdog, 0, sizeof wdog);
9117 wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) |
9120 V_FW_PARAMS_CMD_PFN(pf) |
9121 V_FW_PARAMS_CMD_VFN(vf));
9122 wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog));
9123 wdog.timeout = cpu_to_be32(ticks);
9124 wdog.action = cpu_to_be32(action);
9126 return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL);
9129 int t4_get_devlog_level(struct adapter *adapter, unsigned int *level)
9131 struct fw_devlog_cmd devlog_cmd;
9134 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
9135 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
9136 F_FW_CMD_REQUEST | F_FW_CMD_READ);
9137 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
9138 ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
9139 sizeof(devlog_cmd), &devlog_cmd);
9143 *level = devlog_cmd.level;
9147 int t4_set_devlog_level(struct adapter *adapter, unsigned int level)
9149 struct fw_devlog_cmd devlog_cmd;
9151 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
9152 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
9155 devlog_cmd.level = level;
9156 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
9157 return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
9158 sizeof(devlog_cmd), &devlog_cmd);