2 * Copyright (c) 2012, 2016 Chelsio Communications, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/eventhandler.h>
37 #include "t4_regs_values.h"
38 #include "firmware/t4fw_interface.h"
41 #define msleep(x) do { \
45 pause("t4hw", (x) * hz / 1000); \
49 * t4_wait_op_done_val - wait until an operation is completed
50 * @adapter: the adapter performing the operation
51 * @reg: the register to check for completion
52 * @mask: a single-bit field within @reg that indicates completion
53 * @polarity: the value of the field when the operation is completed
54 * @attempts: number of check iterations
55 * @delay: delay in usecs between iterations
56 * @valp: where to store the value of the register at completion time
58 * Wait until an operation is completed by checking a bit in a register
59 * up to @attempts times. If @valp is not NULL the value of the register
60 * at the time it indicated completion is stored there. Returns 0 if the
61 * operation completes and -EAGAIN otherwise.
63 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
64 int polarity, int attempts, int delay, u32 *valp)
67 u32 val = t4_read_reg(adapter, reg);
69 if (!!(val & mask) == polarity) {
81 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
82 int polarity, int attempts, int delay)
84 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
89 * t4_set_reg_field - set a register field to a value
90 * @adapter: the adapter to program
91 * @addr: the register address
92 * @mask: specifies the portion of the register to modify
93 * @val: the new value for the register field
95 * Sets a register field specified by the supplied mask to the
98 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
101 u32 v = t4_read_reg(adapter, addr) & ~mask;
103 t4_write_reg(adapter, addr, v | val);
104 (void) t4_read_reg(adapter, addr); /* flush */
108 * t4_read_indirect - read indirectly addressed registers
110 * @addr_reg: register holding the indirect address
111 * @data_reg: register holding the value of the indirect register
112 * @vals: where the read register values are stored
113 * @nregs: how many indirect registers to read
114 * @start_idx: index of first indirect register to read
116 * Reads registers that are accessed indirectly through an address/data
119 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
120 unsigned int data_reg, u32 *vals,
121 unsigned int nregs, unsigned int start_idx)
124 t4_write_reg(adap, addr_reg, start_idx);
125 *vals++ = t4_read_reg(adap, data_reg);
131 * t4_write_indirect - write indirectly addressed registers
133 * @addr_reg: register holding the indirect addresses
134 * @data_reg: register holding the value for the indirect registers
135 * @vals: values to write
136 * @nregs: how many indirect registers to write
137 * @start_idx: address of first indirect register to write
139 * Writes a sequential block of registers that are accessed indirectly
140 * through an address/data register pair.
142 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
143 unsigned int data_reg, const u32 *vals,
144 unsigned int nregs, unsigned int start_idx)
147 t4_write_reg(adap, addr_reg, start_idx++);
148 t4_write_reg(adap, data_reg, *vals++);
153 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
154 * mechanism. This guarantees that we get the real value even if we're
155 * operating within a Virtual Machine and the Hypervisor is trapping our
156 * Configuration Space accesses.
158 * N.B. This routine should only be used as a last resort: the firmware uses
159 * the backdoor registers on a regular basis and we can end up
160 * conflicting with it's uses!
162 u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
164 u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg);
167 if (chip_id(adap) <= CHELSIO_T5)
175 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req);
176 val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
179 * Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
180 * Configuration Space read. (None of the other fields matter when
181 * F_ENABLE is 0 so a simple register write is easier than a
182 * read-modify-write via t4_set_reg_field().)
184 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0);
190 * t4_report_fw_error - report firmware error
193 * The adapter firmware can indicate error conditions to the host.
194 * If the firmware has indicated an error, print out the reason for
195 * the firmware error.
197 static void t4_report_fw_error(struct adapter *adap)
199 static const char *const reason[] = {
200 "Crash", /* PCIE_FW_EVAL_CRASH */
201 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
202 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
203 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
204 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
205 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
206 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
207 "Reserved", /* reserved */
211 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
212 if (pcie_fw & F_PCIE_FW_ERR)
213 CH_ERR(adap, "Firmware reports adapter error: %s\n",
214 reason[G_PCIE_FW_EVAL(pcie_fw)]);
218 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
220 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
223 for ( ; nflit; nflit--, mbox_addr += 8)
224 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
228 * Handle a FW assertion reported in a mailbox.
230 static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt)
233 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
234 asrt->u.assert.filename_0_7,
235 be32_to_cpu(asrt->u.assert.line),
236 be32_to_cpu(asrt->u.assert.x),
237 be32_to_cpu(asrt->u.assert.y));
240 #define X_CIM_PF_NOACCESS 0xeeeeeeee
242 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
244 * @mbox: index of the mailbox to use
245 * @cmd: the command to write
246 * @size: command length in bytes
247 * @rpl: where to optionally store the reply
248 * @sleep_ok: if true we may sleep while awaiting command completion
249 * @timeout: time to wait for command to finish before timing out
250 * (negative implies @sleep_ok=false)
252 * Sends the given command to FW through the selected mailbox and waits
253 * for the FW to execute the command. If @rpl is not %NULL it is used to
254 * store the FW's reply to the command. The command and its optional
255 * reply are of the same length. Some FW commands like RESET and
256 * INITIALIZE can take a considerable amount of time to execute.
257 * @sleep_ok determines whether we may sleep while awaiting the response.
258 * If sleeping is allowed we use progressive backoff otherwise we spin.
259 * Note that passing in a negative @timeout is an alternate mechanism
260 * for specifying @sleep_ok=false. This is useful when a higher level
261 * interface allows for specification of @timeout but not @sleep_ok ...
263 * The return value is 0 on success or a negative errno on failure. A
264 * failure can happen either because we are not able to execute the
265 * command or FW executes it but signals an error. In the latter case
266 * the return value is the error code indicated by FW (negated).
268 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
269 int size, void *rpl, bool sleep_ok, int timeout)
272 * We delay in small increments at first in an effort to maintain
273 * responsiveness for simple, fast executing commands but then back
274 * off to larger delays to a maximum retry delay.
276 static const int delay[] = {
277 1, 1, 3, 5, 10, 10, 20, 50, 100
281 int i, ms, delay_idx, ret;
282 const __be64 *p = cmd;
283 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
284 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
286 __be64 cmd_rpl[MBOX_LEN/8];
289 if ((size & 15) || size > MBOX_LEN)
293 * If we have a negative timeout, that implies that we can't sleep.
301 * Attempt to gain access to the mailbox.
303 for (i = 0; i < 4; i++) {
304 ctl = t4_read_reg(adap, ctl_reg);
306 if (v != X_MBOWNER_NONE)
311 * If we were unable to gain access, dequeue ourselves from the
312 * mailbox atomic access list and report the error to our caller.
314 if (v != X_MBOWNER_PL) {
315 t4_report_fw_error(adap);
316 ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
321 * If we gain ownership of the mailbox and there's a "valid" message
322 * in it, this is likely an asynchronous error message from the
323 * firmware. So we'll report that and then proceed on with attempting
324 * to issue our own command ... which may well fail if the error
325 * presaged the firmware crashing ...
327 if (ctl & F_MBMSGVALID) {
328 CH_ERR(adap, "found VALID command in mbox %u: "
329 "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
330 (unsigned long long)t4_read_reg64(adap, data_reg),
331 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
332 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
333 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
334 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
335 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
336 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
337 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
341 * Copy in the new mailbox command and send it on its way ...
343 for (i = 0; i < size; i += 8, p++)
344 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
346 CH_DUMP_MBOX(adap, mbox, data_reg);
348 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
349 t4_read_reg(adap, ctl_reg); /* flush write */
355 * Loop waiting for the reply; bail out if we time out or the firmware
359 !((pcie_fw = t4_read_reg(adap, A_PCIE_FW)) & F_PCIE_FW_ERR) &&
363 ms = delay[delay_idx]; /* last element may repeat */
364 if (delay_idx < ARRAY_SIZE(delay) - 1)
371 v = t4_read_reg(adap, ctl_reg);
372 if (v == X_CIM_PF_NOACCESS)
374 if (G_MBOWNER(v) == X_MBOWNER_PL) {
375 if (!(v & F_MBMSGVALID)) {
376 t4_write_reg(adap, ctl_reg,
377 V_MBOWNER(X_MBOWNER_NONE));
382 * Retrieve the command reply and release the mailbox.
384 get_mbox_rpl(adap, cmd_rpl, MBOX_LEN/8, data_reg);
385 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
387 CH_DUMP_MBOX(adap, mbox, data_reg);
389 res = be64_to_cpu(cmd_rpl[0]);
390 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
391 fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl);
392 res = V_FW_CMD_RETVAL(EIO);
394 memcpy(rpl, cmd_rpl, size);
395 return -G_FW_CMD_RETVAL((int)res);
400 * We timed out waiting for a reply to our mailbox command. Report
401 * the error and also check to see if the firmware reported any
404 ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
405 CH_ERR(adap, "command %#x in mailbox %d timed out\n",
406 *(const u8 *)cmd, mbox);
408 t4_report_fw_error(adap);
413 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
414 void *rpl, bool sleep_ok)
416 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl,
417 sleep_ok, FW_CMD_MAX_TIMEOUT);
421 static int t4_edc_err_read(struct adapter *adap, int idx)
423 u32 edc_ecc_err_addr_reg;
424 u32 edc_bist_status_rdata_reg;
427 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
430 if (idx != 0 && idx != 1) {
431 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
435 edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx);
436 edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx);
439 "edc%d err addr 0x%x: 0x%x.\n",
440 idx, edc_ecc_err_addr_reg,
441 t4_read_reg(adap, edc_ecc_err_addr_reg));
443 "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
444 edc_bist_status_rdata_reg,
445 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg),
446 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8),
447 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16),
448 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24),
449 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32),
450 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40),
451 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48),
452 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56),
453 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64));
459 * t4_mc_read - read from MC through backdoor accesses
461 * @idx: which MC to access
462 * @addr: address of first byte requested
463 * @data: 64 bytes of data containing the requested address
464 * @ecc: where to store the corresponding 64-bit ECC word
466 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
467 * that covers the requested address @addr. If @parity is not %NULL it
468 * is assigned the 64-bit ECC word for the read data.
470 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
473 u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
474 u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
477 mc_bist_cmd_reg = A_MC_BIST_CMD;
478 mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR;
479 mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN;
480 mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA;
481 mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN;
483 mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx);
484 mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx);
485 mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx);
486 mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA,
488 mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN,
492 if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST)
494 t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU);
495 t4_write_reg(adap, mc_bist_cmd_len_reg, 64);
496 t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc);
497 t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) |
498 F_START_BIST | V_BIST_CMD_GAP(1));
499 i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
503 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i)
505 for (i = 15; i >= 0; i--)
506 *data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
508 *ecc = t4_read_reg64(adap, MC_DATA(16));
514 * t4_edc_read - read from EDC through backdoor accesses
516 * @idx: which EDC to access
517 * @addr: address of first byte requested
518 * @data: 64 bytes of data containing the requested address
519 * @ecc: where to store the corresponding 64-bit ECC word
521 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
522 * that covers the requested address @addr. If @parity is not %NULL it
523 * is assigned the 64-bit ECC word for the read data.
525 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
528 u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
529 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
532 edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx);
533 edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx);
534 edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx);
535 edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN,
537 edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA,
541 * These macro are missing in t4_regs.h file.
542 * Added temporarily for testing.
544 #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
545 #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
546 edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx);
547 edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx);
548 edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx);
549 edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN,
551 edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA,
557 if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST)
559 t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU);
560 t4_write_reg(adap, edc_bist_cmd_len_reg, 64);
561 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
562 t4_write_reg(adap, edc_bist_cmd_reg,
563 V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
564 i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
568 #define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i)
570 for (i = 15; i >= 0; i--)
571 *data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
573 *ecc = t4_read_reg64(adap, EDC_DATA(16));
579 * t4_mem_read - read EDC 0, EDC 1 or MC into buffer
581 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
582 * @addr: address within indicated memory type
583 * @len: amount of memory to read
584 * @buf: host memory buffer
586 * Reads an [almost] arbitrary memory region in the firmware: the
587 * firmware memory address, length and host buffer must be aligned on
588 * 32-bit boudaries. The memory is returned as a raw byte sequence from
589 * the firmware's memory. If this memory contains data structures which
590 * contain multi-byte integers, it's the callers responsibility to
591 * perform appropriate byte order conversions.
593 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
596 u32 pos, start, end, offset;
600 * Argument sanity checks ...
602 if ((addr & 0x3) || (len & 0x3))
606 * The underlaying EDC/MC read routines read 64 bytes at a time so we
607 * need to round down the start and round up the end. We'll start
608 * copying out of the first line at (addr - start) a word at a time.
610 start = rounddown2(addr, 64);
611 end = roundup2(addr + len, 64);
612 offset = (addr - start)/sizeof(__be32);
614 for (pos = start; pos < end; pos += 64, offset = 0) {
618 * Read the chip's memory block and bail if there's an error.
620 if ((mtype == MEM_MC) || (mtype == MEM_MC1))
621 ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL);
623 ret = t4_edc_read(adap, mtype, pos, data, NULL);
628 * Copy the data into the caller's memory buffer.
630 while (offset < 16 && len > 0) {
631 *buf++ = data[offset++];
632 len -= sizeof(__be32);
640 * Return the specified PCI-E Configuration Space register from our Physical
641 * Function. We try first via a Firmware LDST Command (if fw_attach != 0)
642 * since we prefer to let the firmware own all of these registers, but if that
643 * fails we go for it directly ourselves.
645 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach)
649 * If fw_attach != 0, construct and send the Firmware LDST Command to
650 * retrieve the specified PCI-E Configuration Space register.
652 if (drv_fw_attach != 0) {
653 struct fw_ldst_cmd ldst_cmd;
656 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
657 ldst_cmd.op_to_addrspace =
658 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
661 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
662 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
663 ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1);
664 ldst_cmd.u.pcie.ctrl_to_fn =
665 (F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf));
666 ldst_cmd.u.pcie.r = reg;
669 * If the LDST Command succeeds, return the result, otherwise
670 * fall through to reading it directly ourselves ...
672 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
675 return be32_to_cpu(ldst_cmd.u.pcie.data[0]);
677 CH_WARN(adap, "Firmware failed to return "
678 "Configuration Space register %d, err = %d\n",
683 * Read the desired Configuration Space register via the PCI-E
684 * Backdoor mechanism.
686 return t4_hw_pci_read_cfg4(adap, reg);
690 * t4_get_regs_len - return the size of the chips register set
691 * @adapter: the adapter
693 * Returns the size of the chip's BAR0 register space.
695 unsigned int t4_get_regs_len(struct adapter *adapter)
697 unsigned int chip_version = chip_id(adapter);
699 switch (chip_version) {
701 return T4_REGMAP_SIZE;
705 return T5_REGMAP_SIZE;
709 "Unsupported chip version %d\n", chip_version);
714 * t4_get_regs - read chip registers into provided buffer
716 * @buf: register buffer
717 * @buf_size: size (in bytes) of register buffer
719 * If the provided register buffer isn't large enough for the chip's
720 * full register range, the register dump will be truncated to the
721 * register buffer's size.
723 void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
725 static const unsigned int t4_reg_ranges[] = {
1183 static const unsigned int t5_reg_ranges[] = {
1958 static const unsigned int t6_reg_ranges[] = {
2535 u32 *buf_end = (u32 *)(buf + buf_size);
2536 const unsigned int *reg_ranges;
2537 int reg_ranges_size, range;
2538 unsigned int chip_version = chip_id(adap);
2541 * Select the right set of register ranges to dump depending on the
2542 * adapter chip type.
2544 switch (chip_version) {
2546 reg_ranges = t4_reg_ranges;
2547 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
2551 reg_ranges = t5_reg_ranges;
2552 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
2556 reg_ranges = t6_reg_ranges;
2557 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
2562 "Unsupported chip version %d\n", chip_version);
2567 * Clear the register buffer and insert the appropriate register
2568 * values selected by the above register ranges.
2570 memset(buf, 0, buf_size);
2571 for (range = 0; range < reg_ranges_size; range += 2) {
2572 unsigned int reg = reg_ranges[range];
2573 unsigned int last_reg = reg_ranges[range + 1];
2574 u32 *bufp = (u32 *)(buf + reg);
2577 * Iterate across the register range filling in the register
2578 * buffer but don't write past the end of the register buffer.
2580 while (reg <= last_reg && bufp < buf_end) {
2581 *bufp++ = t4_read_reg(adap, reg);
2588 * Partial EEPROM Vital Product Data structure. Includes only the ID and
2600 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
2602 #define EEPROM_DELAY 10 /* 10us per poll spin */
2603 #define EEPROM_MAX_POLL 5000 /* x 5000 == 50ms */
2605 #define EEPROM_STAT_ADDR 0x7bfc
2606 #define VPD_BASE 0x400
2607 #define VPD_BASE_OLD 0
2608 #define VPD_LEN 1024
2609 #define VPD_INFO_FLD_HDR_SIZE 3
2610 #define CHELSIO_VPD_UNIQUE_ID 0x82
2613 * Small utility function to wait till any outstanding VPD Access is complete.
2614 * We have a per-adapter state variable "VPD Busy" to indicate when we have a
2615 * VPD Access in flight. This allows us to handle the problem of having a
2616 * previous VPD Access time out and prevent an attempt to inject a new VPD
2617 * Request before any in-flight VPD reguest has completed.
2619 static int t4_seeprom_wait(struct adapter *adapter)
2621 unsigned int base = adapter->params.pci.vpd_cap_addr;
2625 * If no VPD Access is in flight, we can just return success right
2628 if (!adapter->vpd_busy)
2632 * Poll the VPD Capability Address/Flag register waiting for it
2633 * to indicate that the operation is complete.
2635 max_poll = EEPROM_MAX_POLL;
2639 udelay(EEPROM_DELAY);
2640 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
2643 * If the operation is complete, mark the VPD as no longer
2644 * busy and return success.
2646 if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
2647 adapter->vpd_busy = 0;
2650 } while (--max_poll);
2653 * Failure! Note that we leave the VPD Busy status set in order to
2654 * avoid pushing a new VPD Access request into the VPD Capability till
2655 * the current operation eventually succeeds. It's a bug to issue a
2656 * new request when an existing request is in flight and will result
2657 * in corrupt hardware state.
2663 * t4_seeprom_read - read a serial EEPROM location
2664 * @adapter: adapter to read
2665 * @addr: EEPROM virtual address
2666 * @data: where to store the read data
2668 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
2669 * VPD capability. Note that this function must be called with a virtual
2672 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
2674 unsigned int base = adapter->params.pci.vpd_cap_addr;
2678 * VPD Accesses must alway be 4-byte aligned!
2680 if (addr >= EEPROMVSIZE || (addr & 3))
2684 * Wait for any previous operation which may still be in flight to
2687 ret = t4_seeprom_wait(adapter);
2689 CH_ERR(adapter, "VPD still busy from previous operation\n");
2694 * Issue our new VPD Read request, mark the VPD as being busy and wait
2695 * for our request to complete. If it doesn't complete, note the
2696 * error and return it to our caller. Note that we do not reset the
2699 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
2700 adapter->vpd_busy = 1;
2701 adapter->vpd_flag = PCI_VPD_ADDR_F;
2702 ret = t4_seeprom_wait(adapter);
2704 CH_ERR(adapter, "VPD read of address %#x failed\n", addr);
2709 * Grab the returned data, swizzle it into our endianness and
2712 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
2713 *data = le32_to_cpu(*data);
2718 * t4_seeprom_write - write a serial EEPROM location
2719 * @adapter: adapter to write
2720 * @addr: virtual EEPROM address
2721 * @data: value to write
2723 * Write a 32-bit word to a location in serial EEPROM using the card's PCI
2724 * VPD capability. Note that this function must be called with a virtual
2727 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
2729 unsigned int base = adapter->params.pci.vpd_cap_addr;
2735 * VPD Accesses must alway be 4-byte aligned!
2737 if (addr >= EEPROMVSIZE || (addr & 3))
2741 * Wait for any previous operation which may still be in flight to
2744 ret = t4_seeprom_wait(adapter);
2746 CH_ERR(adapter, "VPD still busy from previous operation\n");
2751 * Issue our new VPD Read request, mark the VPD as being busy and wait
2752 * for our request to complete. If it doesn't complete, note the
2753 * error and return it to our caller. Note that we do not reset the
2756 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
2758 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
2759 (u16)addr | PCI_VPD_ADDR_F);
2760 adapter->vpd_busy = 1;
2761 adapter->vpd_flag = 0;
2762 ret = t4_seeprom_wait(adapter);
2764 CH_ERR(adapter, "VPD write of address %#x failed\n", addr);
2769 * Reset PCI_VPD_DATA register after a transaction and wait for our
2770 * request to complete. If it doesn't complete, return error.
2772 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
2773 max_poll = EEPROM_MAX_POLL;
2775 udelay(EEPROM_DELAY);
2776 t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
2777 } while ((stats_reg & 0x1) && --max_poll);
2781 /* Return success! */
2786 * t4_eeprom_ptov - translate a physical EEPROM address to virtual
2787 * @phys_addr: the physical EEPROM address
2788 * @fn: the PCI function number
2789 * @sz: size of function-specific area
2791 * Translate a physical EEPROM address to virtual. The first 1K is
2792 * accessed through virtual addresses starting at 31K, the rest is
2793 * accessed through virtual addresses starting at 0.
2795 * The mapping is as follows:
2796 * [0..1K) -> [31K..32K)
2797 * [1K..1K+A) -> [ES-A..ES)
2798 * [1K+A..ES) -> [0..ES-A-1K)
2800 * where A = @fn * @sz, and ES = EEPROM size.
2802 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2805 if (phys_addr < 1024)
2806 return phys_addr + (31 << 10);
2807 if (phys_addr < 1024 + fn)
2808 return EEPROMSIZE - fn + phys_addr - 1024;
2809 if (phys_addr < EEPROMSIZE)
2810 return phys_addr - 1024 - fn;
2815 * t4_seeprom_wp - enable/disable EEPROM write protection
2816 * @adapter: the adapter
2817 * @enable: whether to enable or disable write protection
2819 * Enables or disables write protection on the serial EEPROM.
2821 int t4_seeprom_wp(struct adapter *adapter, int enable)
2823 return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
2827 * get_vpd_keyword_val - Locates an information field keyword in the VPD
2828 * @v: Pointer to buffered vpd data structure
2829 * @kw: The keyword to search for
2831 * Returns the value of the information field keyword or
2832 * -ENOENT otherwise.
2834 static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
2837 unsigned int offset , len;
2838 const u8 *buf = (const u8 *)v;
2839 const u8 *vpdr_len = &v->vpdr_len[0];
2840 offset = sizeof(struct t4_vpd_hdr);
2841 len = (u16)vpdr_len[0] + ((u16)vpdr_len[1] << 8);
2843 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
2847 for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
2848 if(memcmp(buf + i , kw , 2) == 0){
2849 i += VPD_INFO_FLD_HDR_SIZE;
2853 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
2861 * get_vpd_params - read VPD parameters from VPD EEPROM
2862 * @adapter: adapter to read
2863 * @p: where to store the parameters
2864 * @vpd: caller provided temporary space to read the VPD into
2866 * Reads card parameters stored in VPD EEPROM.
2868 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p,
2874 const struct t4_vpd_hdr *v;
2877 * Card information normally starts at VPD_BASE but early cards had
2880 ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd));
2885 * The VPD shall have a unique identifier specified by the PCI SIG.
2886 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
2887 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
2888 * is expected to automatically put this entry at the
2889 * beginning of the VPD.
2891 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
2893 for (i = 0; i < VPD_LEN; i += 4) {
2894 ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i));
2898 v = (const struct t4_vpd_hdr *)vpd;
2900 #define FIND_VPD_KW(var,name) do { \
2901 var = get_vpd_keyword_val(v , name); \
2903 CH_ERR(adapter, "missing VPD keyword " name "\n"); \
2908 FIND_VPD_KW(i, "RV");
2909 for (csum = 0; i >= 0; i--)
2914 "corrupted VPD EEPROM, actual csum %u\n", csum);
2918 FIND_VPD_KW(ec, "EC");
2919 FIND_VPD_KW(sn, "SN");
2920 FIND_VPD_KW(pn, "PN");
2921 FIND_VPD_KW(na, "NA");
2924 memcpy(p->id, v->id_data, ID_LEN);
2926 memcpy(p->ec, vpd + ec, EC_LEN);
2928 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
2929 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
2931 i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2];
2932 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
2933 strstrip((char *)p->pn);
2934 i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2];
2935 memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
2936 strstrip((char *)p->na);
2941 /* serial flash and firmware constants and flash config file constants */
2943 SF_ATTEMPTS = 10, /* max retries for SF operations */
2945 /* flash command opcodes */
2946 SF_PROG_PAGE = 2, /* program page */
2947 SF_WR_DISABLE = 4, /* disable writes */
2948 SF_RD_STATUS = 5, /* read status register */
2949 SF_WR_ENABLE = 6, /* enable writes */
2950 SF_RD_DATA_FAST = 0xb, /* read flash */
2951 SF_RD_ID = 0x9f, /* read ID */
2952 SF_ERASE_SECTOR = 0xd8, /* erase sector */
2956 * sf1_read - read data from the serial flash
2957 * @adapter: the adapter
2958 * @byte_cnt: number of bytes to read
2959 * @cont: whether another operation will be chained
2960 * @lock: whether to lock SF for PL access only
2961 * @valp: where to store the read data
2963 * Reads up to 4 bytes of data from the serial flash. The location of
2964 * the read needs to be specified prior to calling this by issuing the
2965 * appropriate commands to the serial flash.
2967 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
2968 int lock, u32 *valp)
2972 if (!byte_cnt || byte_cnt > 4)
2974 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
2976 t4_write_reg(adapter, A_SF_OP,
2977 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
2978 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
2980 *valp = t4_read_reg(adapter, A_SF_DATA);
2985 * sf1_write - write data to the serial flash
2986 * @adapter: the adapter
2987 * @byte_cnt: number of bytes to write
2988 * @cont: whether another operation will be chained
2989 * @lock: whether to lock SF for PL access only
2990 * @val: value to write
2992 * Writes up to 4 bytes of data to the serial flash. The location of
2993 * the write needs to be specified prior to calling this by issuing the
2994 * appropriate commands to the serial flash.
2996 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
2999 if (!byte_cnt || byte_cnt > 4)
3001 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3003 t4_write_reg(adapter, A_SF_DATA, val);
3004 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
3005 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
3006 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3010 * flash_wait_op - wait for a flash operation to complete
3011 * @adapter: the adapter
3012 * @attempts: max number of polls of the status register
3013 * @delay: delay between polls in ms
3015 * Wait for a flash operation to complete by polling the status register.
3017 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
3023 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
3024 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
3028 if (--attempts == 0)
3036 * t4_read_flash - read words from serial flash
3037 * @adapter: the adapter
3038 * @addr: the start address for the read
3039 * @nwords: how many 32-bit words to read
3040 * @data: where to store the read data
3041 * @byte_oriented: whether to store data as bytes or as words
3043 * Read the specified number of 32-bit words from the serial flash.
3044 * If @byte_oriented is set the read data is stored as a byte array
3045 * (i.e., big-endian), otherwise as 32-bit words in the platform's
3046 * natural endianness.
3048 int t4_read_flash(struct adapter *adapter, unsigned int addr,
3049 unsigned int nwords, u32 *data, int byte_oriented)
3053 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
3056 addr = swab32(addr) | SF_RD_DATA_FAST;
3058 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
3059 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
3062 for ( ; nwords; nwords--, data++) {
3063 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
3065 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3069 *data = (__force __u32)(cpu_to_be32(*data));
3075 * t4_write_flash - write up to a page of data to the serial flash
3076 * @adapter: the adapter
3077 * @addr: the start address to write
3078 * @n: length of data to write in bytes
3079 * @data: the data to write
3080 * @byte_oriented: whether to store data as bytes or as words
3082 * Writes up to a page of data (256 bytes) to the serial flash starting
3083 * at the given address. All the data must be written to the same page.
3084 * If @byte_oriented is set the write data is stored as byte stream
3085 * (i.e. matches what on disk), otherwise in big-endian.
3087 int t4_write_flash(struct adapter *adapter, unsigned int addr,
3088 unsigned int n, const u8 *data, int byte_oriented)
3091 u32 buf[SF_PAGE_SIZE / 4];
3092 unsigned int i, c, left, val, offset = addr & 0xff;
3094 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
3097 val = swab32(addr) | SF_PROG_PAGE;
3099 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3100 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
3103 for (left = n; left; left -= c) {
3105 for (val = 0, i = 0; i < c; ++i)
3106 val = (val << 8) + *data++;
3109 val = cpu_to_be32(val);
3111 ret = sf1_write(adapter, c, c != left, 1, val);
3115 ret = flash_wait_op(adapter, 8, 1);
3119 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3121 /* Read the page to verify the write succeeded */
3122 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
3127 if (memcmp(data - n, (u8 *)buf + offset, n)) {
3129 "failed to correctly write the flash page at %#x\n",
3136 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3141 * t4_get_fw_version - read the firmware version
3142 * @adapter: the adapter
3143 * @vers: where to place the version
3145 * Reads the FW version from flash.
3147 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
3149 return t4_read_flash(adapter, FLASH_FW_START +
3150 offsetof(struct fw_hdr, fw_ver), 1,
3155 * t4_get_tp_version - read the TP microcode version
3156 * @adapter: the adapter
3157 * @vers: where to place the version
3159 * Reads the TP microcode version from flash.
3161 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
3163 return t4_read_flash(adapter, FLASH_FW_START +
3164 offsetof(struct fw_hdr, tp_microcode_ver),
3169 * t4_get_exprom_version - return the Expansion ROM version (if any)
3170 * @adapter: the adapter
3171 * @vers: where to place the version
3173 * Reads the Expansion ROM header from FLASH and returns the version
3174 * number (if present) through the @vers return value pointer. We return
3175 * this in the Firmware Version Format since it's convenient. Return
3176 * 0 on success, -ENOENT if no Expansion ROM is present.
3178 int t4_get_exprom_version(struct adapter *adap, u32 *vers)
3180 struct exprom_header {
3181 unsigned char hdr_arr[16]; /* must start with 0x55aa */
3182 unsigned char hdr_ver[4]; /* Expansion ROM version */
3184 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
3188 ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
3189 ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
3194 hdr = (struct exprom_header *)exprom_header_buf;
3195 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
3198 *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
3199 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
3200 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
3201 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
3206 * t4_flash_erase_sectors - erase a range of flash sectors
3207 * @adapter: the adapter
3208 * @start: the first sector to erase
3209 * @end: the last sector to erase
3211 * Erases the sectors in the given inclusive range.
3213 int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
3217 if (end >= adapter->params.sf_nsec)
3220 while (start <= end) {
3221 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3222 (ret = sf1_write(adapter, 4, 0, 1,
3223 SF_ERASE_SECTOR | (start << 8))) != 0 ||
3224 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
3226 "erase of flash sector %d failed, error %d\n",
3232 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3237 * t4_flash_cfg_addr - return the address of the flash configuration file
3238 * @adapter: the adapter
3240 * Return the address within the flash where the Firmware Configuration
3241 * File is stored, or an error if the device FLASH is too small to contain
3242 * a Firmware Configuration File.
3244 int t4_flash_cfg_addr(struct adapter *adapter)
3247 * If the device FLASH isn't large enough to hold a Firmware
3248 * Configuration File, return an error.
3250 if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
3253 return FLASH_CFG_START;
3257 * Return TRUE if the specified firmware matches the adapter. I.e. T4
3258 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
3259 * and emit an error message for mismatched firmware to save our caller the
3262 static int t4_fw_matches_chip(struct adapter *adap,
3263 const struct fw_hdr *hdr)
3266 * The expression below will return FALSE for any unsupported adapter
3267 * which will keep us "honest" in the future ...
3269 if ((is_t4(adap) && hdr->chip == FW_HDR_CHIP_T4) ||
3270 (is_t5(adap) && hdr->chip == FW_HDR_CHIP_T5) ||
3271 (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6))
3275 "FW image (%d) is not suitable for this adapter (%d)\n",
3276 hdr->chip, chip_id(adap));
3281 * t4_load_fw - download firmware
3282 * @adap: the adapter
3283 * @fw_data: the firmware image to write
3286 * Write the supplied firmware image to the card's serial flash.
3288 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3293 u8 first_page[SF_PAGE_SIZE];
3294 const u32 *p = (const u32 *)fw_data;
3295 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
3296 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
3297 unsigned int fw_start_sec;
3298 unsigned int fw_start;
3299 unsigned int fw_size;
3301 if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) {
3302 fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
3303 fw_start = FLASH_FWBOOTSTRAP_START;
3304 fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
3306 fw_start_sec = FLASH_FW_START_SEC;
3307 fw_start = FLASH_FW_START;
3308 fw_size = FLASH_FW_MAX_SIZE;
3312 CH_ERR(adap, "FW image has no data\n");
3317 "FW image size not multiple of 512 bytes\n");
3320 if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) {
3322 "FW image size differs from size in FW header\n");
3325 if (size > fw_size) {
3326 CH_ERR(adap, "FW image too large, max is %u bytes\n",
3330 if (!t4_fw_matches_chip(adap, hdr))
3333 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
3334 csum += be32_to_cpu(p[i]);
3336 if (csum != 0xffffffff) {
3338 "corrupted firmware image, checksum %#x\n", csum);
3342 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
3343 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
3348 * We write the correct version at the end so the driver can see a bad
3349 * version if the FW write fails. Start by writing a copy of the
3350 * first page with a bad version.
3352 memcpy(first_page, fw_data, SF_PAGE_SIZE);
3353 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
3354 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
3359 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
3360 addr += SF_PAGE_SIZE;
3361 fw_data += SF_PAGE_SIZE;
3362 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
3367 ret = t4_write_flash(adap,
3368 fw_start + offsetof(struct fw_hdr, fw_ver),
3369 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
3372 CH_ERR(adap, "firmware download failed, error %d\n",
3378 * t4_fwcache - firmware cache operation
3379 * @adap: the adapter
3380 * @op : the operation (flush or flush and invalidate)
3382 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
3384 struct fw_params_cmd c;
3386 memset(&c, 0, sizeof(c));
3388 cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
3389 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
3390 V_FW_PARAMS_CMD_PFN(adap->pf) |
3391 V_FW_PARAMS_CMD_VFN(0));
3392 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3394 cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3395 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE));
3396 c.param[0].val = (__force __be32)op;
3398 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
3401 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
3402 unsigned int *pif_req_wrptr,
3403 unsigned int *pif_rsp_wrptr)
3406 u32 cfg, val, req, rsp;
3408 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
3409 if (cfg & F_LADBGEN)
3410 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
3412 val = t4_read_reg(adap, A_CIM_DEBUGSTS);
3413 req = G_POLADBGWRPTR(val);
3414 rsp = G_PILADBGWRPTR(val);
3416 *pif_req_wrptr = req;
3418 *pif_rsp_wrptr = rsp;
3420 for (i = 0; i < CIM_PIFLA_SIZE; i++) {
3421 for (j = 0; j < 6; j++) {
3422 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
3423 V_PILADBGRDPTR(rsp));
3424 *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
3425 *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
3429 req = (req + 2) & M_POLADBGRDPTR;
3430 rsp = (rsp + 2) & M_PILADBGRDPTR;
3432 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
3435 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
3440 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
3441 if (cfg & F_LADBGEN)
3442 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
3444 for (i = 0; i < CIM_MALA_SIZE; i++) {
3445 for (j = 0; j < 5; j++) {
3447 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
3448 V_PILADBGRDPTR(idx));
3449 *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
3450 *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
3453 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
3456 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
3460 for (i = 0; i < 8; i++) {
3461 u32 *p = la_buf + i;
3463 t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
3464 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
3465 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
3466 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
3467 *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
3471 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
3472 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
3473 FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
3476 * t4_link_l1cfg - apply link configuration to MAC/PHY
3477 * @phy: the PHY to setup
3478 * @mac: the MAC to setup
3479 * @lc: the requested link configuration
3481 * Set up a port's MAC and PHY according to a desired link configuration.
3482 * - If the PHY can auto-negotiate first decide what to advertise, then
3483 * enable/disable auto-negotiation as desired, and reset.
3484 * - If the PHY does not auto-negotiate just reset it.
3485 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
3486 * otherwise do it later based on the outcome of auto-negotiation.
3488 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
3489 struct link_config *lc)
3491 struct fw_port_cmd c;
3492 unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
3495 if (lc->requested_fc & PAUSE_RX)
3496 fc |= FW_PORT_CAP_FC_RX;
3497 if (lc->requested_fc & PAUSE_TX)
3498 fc |= FW_PORT_CAP_FC_TX;
3500 memset(&c, 0, sizeof(c));
3501 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
3502 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
3503 V_FW_PORT_CMD_PORTID(port));
3505 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
3508 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
3509 c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
3511 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
3512 } else if (lc->autoneg == AUTONEG_DISABLE) {
3513 c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | mdi);
3514 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
3516 c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | mdi);
3518 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3522 * t4_restart_aneg - restart autonegotiation
3523 * @adap: the adapter
3524 * @mbox: mbox to use for the FW command
3525 * @port: the port id
3527 * Restarts autonegotiation for the selected port.
3529 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
3531 struct fw_port_cmd c;
3533 memset(&c, 0, sizeof(c));
3534 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
3535 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
3536 V_FW_PORT_CMD_PORTID(port));
3538 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
3540 c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
3541 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3544 typedef void (*int_handler_t)(struct adapter *adap);
3547 unsigned int mask; /* bits to check in interrupt status */
3548 const char *msg; /* message to print or NULL */
3549 short stat_idx; /* stat counter to increment or -1 */
3550 unsigned short fatal; /* whether the condition reported is fatal */
3551 int_handler_t int_handler; /* platform-specific int handler */
3555 * t4_handle_intr_status - table driven interrupt handler
3556 * @adapter: the adapter that generated the interrupt
3557 * @reg: the interrupt status register to process
3558 * @acts: table of interrupt actions
3560 * A table driven interrupt handler that applies a set of masks to an
3561 * interrupt status word and performs the corresponding actions if the
3562 * interrupts described by the mask have occurred. The actions include
3563 * optionally emitting a warning or alert message. The table is terminated
3564 * by an entry specifying mask 0. Returns the number of fatal interrupt
3567 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
3568 const struct intr_info *acts)
3571 unsigned int mask = 0;
3572 unsigned int status = t4_read_reg(adapter, reg);
3574 for ( ; acts->mask; ++acts) {
3575 if (!(status & acts->mask))
3579 CH_ALERT(adapter, "%s (0x%x)\n", acts->msg,
3580 status & acts->mask);
3581 } else if (acts->msg)
3582 CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n", acts->msg,
3583 status & acts->mask);
3584 if (acts->int_handler)
3585 acts->int_handler(adapter);
3589 if (status) /* clear processed interrupts */
3590 t4_write_reg(adapter, reg, status);
3595 * Interrupt handler for the PCIE module.
3597 static void pcie_intr_handler(struct adapter *adapter)
3599 static const struct intr_info sysbus_intr_info[] = {
3600 { F_RNPP, "RXNP array parity error", -1, 1 },
3601 { F_RPCP, "RXPC array parity error", -1, 1 },
3602 { F_RCIP, "RXCIF array parity error", -1, 1 },
3603 { F_RCCP, "Rx completions control array parity error", -1, 1 },
3604 { F_RFTP, "RXFT array parity error", -1, 1 },
3607 static const struct intr_info pcie_port_intr_info[] = {
3608 { F_TPCP, "TXPC array parity error", -1, 1 },
3609 { F_TNPP, "TXNP array parity error", -1, 1 },
3610 { F_TFTP, "TXFT array parity error", -1, 1 },
3611 { F_TCAP, "TXCA array parity error", -1, 1 },
3612 { F_TCIP, "TXCIF array parity error", -1, 1 },
3613 { F_RCAP, "RXCA array parity error", -1, 1 },
3614 { F_OTDD, "outbound request TLP discarded", -1, 1 },
3615 { F_RDPE, "Rx data parity error", -1, 1 },
3616 { F_TDUE, "Tx uncorrectable data error", -1, 1 },
3619 static const struct intr_info pcie_intr_info[] = {
3620 { F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
3621 { F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
3622 { F_MSIDATAPERR, "MSI data parity error", -1, 1 },
3623 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
3624 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
3625 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
3626 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
3627 { F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
3628 { F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
3629 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
3630 { F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
3631 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
3632 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
3633 { F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
3634 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
3635 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
3636 { F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
3637 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
3638 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
3639 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
3640 { F_FIDPERR, "PCI FID parity error", -1, 1 },
3641 { F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
3642 { F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
3643 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
3644 { F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
3645 { F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
3646 { F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
3647 { F_PCIESINT, "PCI core secondary fault", -1, 1 },
3648 { F_PCIEPINT, "PCI core primary fault", -1, 1 },
3649 { F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
3654 static const struct intr_info t5_pcie_intr_info[] = {
3655 { F_MSTGRPPERR, "Master Response Read Queue parity error",
3657 { F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
3658 { F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
3659 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
3660 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
3661 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
3662 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
3663 { F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
3665 { F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
3667 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
3668 { F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
3669 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
3670 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
3671 { F_DREQWRPERR, "PCI DMA channel write request parity error",
3673 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
3674 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
3675 { F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
3676 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
3677 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
3678 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
3679 { F_FIDPERR, "PCI FID parity error", -1, 1 },
3680 { F_VFIDPERR, "PCI INTx clear parity error", -1, 1 },
3681 { F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
3682 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
3683 { F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
3685 { F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
3687 { F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
3688 { F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
3689 { F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
3690 { F_READRSPERR, "Outbound read error", -1,
3698 fat = t4_handle_intr_status(adapter,
3699 A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
3701 t4_handle_intr_status(adapter,
3702 A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
3703 pcie_port_intr_info) +
3704 t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
3707 fat = t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
3710 t4_fatal_err(adapter);
3714 * TP interrupt handler.
3716 static void tp_intr_handler(struct adapter *adapter)
3718 static const struct intr_info tp_intr_info[] = {
3719 { 0x3fffffff, "TP parity error", -1, 1 },
3720 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
3724 if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
3725 t4_fatal_err(adapter);
3729 * SGE interrupt handler.
3731 static void sge_intr_handler(struct adapter *adapter)
3736 static const struct intr_info sge_intr_info[] = {
3737 { F_ERR_CPL_EXCEED_IQE_SIZE,
3738 "SGE received CPL exceeding IQE size", -1, 1 },
3739 { F_ERR_INVALID_CIDX_INC,
3740 "SGE GTS CIDX increment too large", -1, 0 },
3741 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
3742 { F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
3743 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
3744 "SGE IQID > 1023 received CPL for FL", -1, 0 },
3745 { F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
3747 { F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
3749 { F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
3751 { F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
3753 { F_ERR_ING_CTXT_PRIO,
3754 "SGE too many priority ingress contexts", -1, 0 },
3755 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
3756 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
3760 static const struct intr_info t4t5_sge_intr_info[] = {
3761 { F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
3762 { F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
3763 { F_ERR_EGR_CTXT_PRIO,
3764 "SGE too many priority egress contexts", -1, 0 },
3769 * For now, treat below interrupts as fatal so that we disable SGE and
3770 * get better debug */
3771 static const struct intr_info t6_sge_intr_info[] = {
3772 { F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1,
3773 "SGE PCIe error for a DBP thread", -1, 1 },
3775 "SGE Actual WRE packet is less than advertized length",
3780 v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
3781 ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
3783 CH_ALERT(adapter, "SGE parity error (%#llx)\n",
3784 (unsigned long long)v);
3785 t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
3786 t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
3789 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
3790 if (chip_id(adapter) <= CHELSIO_T5)
3791 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
3792 t4t5_sge_intr_info);
3794 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
3797 err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
3798 if (err & F_ERROR_QID_VALID) {
3799 CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
3800 if (err & F_UNCAPTURED_ERROR)
3801 CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
3802 t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
3803 F_UNCAPTURED_ERROR);
3807 t4_fatal_err(adapter);
3810 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
3811 F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
3812 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
3813 F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
3816 * CIM interrupt handler.
3818 static void cim_intr_handler(struct adapter *adapter)
3820 static const struct intr_info cim_intr_info[] = {
3821 { F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
3822 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
3823 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
3824 { F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
3825 { F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
3826 { F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
3827 { F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
3830 static const struct intr_info cim_upintr_info[] = {
3831 { F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
3832 { F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
3833 { F_ILLWRINT, "CIM illegal write", -1, 1 },
3834 { F_ILLRDINT, "CIM illegal read", -1, 1 },
3835 { F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
3836 { F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
3837 { F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
3838 { F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
3839 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
3840 { F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
3841 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
3842 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
3843 { F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
3844 { F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
3845 { F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
3846 { F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
3847 { F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
3848 { F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
3849 { F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
3850 { F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
3851 { F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
3852 { F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
3853 { F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
3854 { F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
3855 { F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
3856 { F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
3857 { F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
3858 { F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
3863 if (t4_read_reg(adapter, A_PCIE_FW) & F_PCIE_FW_ERR)
3864 t4_report_fw_error(adapter);
3866 fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
3868 t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
3871 t4_fatal_err(adapter);
3875 * ULP RX interrupt handler.
3877 static void ulprx_intr_handler(struct adapter *adapter)
3879 static const struct intr_info ulprx_intr_info[] = {
3880 { F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
3881 { F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
3882 { 0x7fffff, "ULPRX parity error", -1, 1 },
3886 if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
3887 t4_fatal_err(adapter);
3891 * ULP TX interrupt handler.
3893 static void ulptx_intr_handler(struct adapter *adapter)
3895 static const struct intr_info ulptx_intr_info[] = {
3896 { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
3898 { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
3900 { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
3902 { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
3904 { 0xfffffff, "ULPTX parity error", -1, 1 },
3908 if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
3909 t4_fatal_err(adapter);
3913 * PM TX interrupt handler.
3915 static void pmtx_intr_handler(struct adapter *adapter)
3917 static const struct intr_info pmtx_intr_info[] = {
3918 { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
3919 { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
3920 { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
3921 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
3922 { 0xffffff0, "PMTX framing error", -1, 1 },
3923 { F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
3924 { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
3926 { F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
3927 { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
3931 if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
3932 t4_fatal_err(adapter);
3936 * PM RX interrupt handler.
3938 static void pmrx_intr_handler(struct adapter *adapter)
3940 static const struct intr_info pmrx_intr_info[] = {
3941 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
3942 { 0x3ffff0, "PMRX framing error", -1, 1 },
3943 { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
3944 { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
3946 { F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
3947 { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
3951 if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
3952 t4_fatal_err(adapter);
3956 * CPL switch interrupt handler.
3958 static void cplsw_intr_handler(struct adapter *adapter)
3960 static const struct intr_info cplsw_intr_info[] = {
3961 { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
3962 { F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
3963 { F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
3964 { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
3965 { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
3966 { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
3970 if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
3971 t4_fatal_err(adapter);
3975 * LE interrupt handler.
3977 static void le_intr_handler(struct adapter *adap)
3979 unsigned int chip_ver = chip_id(adap);
3980 static const struct intr_info le_intr_info[] = {
3981 { F_LIPMISS, "LE LIP miss", -1, 0 },
3982 { F_LIP0, "LE 0 LIP error", -1, 0 },
3983 { F_PARITYERR, "LE parity error", -1, 1 },
3984 { F_UNKNOWNCMD, "LE unknown command", -1, 1 },
3985 { F_REQQPARERR, "LE request queue parity error", -1, 1 },
3989 static const struct intr_info t6_le_intr_info[] = {
3990 { F_T6_LIPMISS, "LE LIP miss", -1, 0 },
3991 { F_T6_LIP0, "LE 0 LIP error", -1, 0 },
3992 { F_TCAMINTPERR, "LE parity error", -1, 1 },
3993 { F_T6_UNKNOWNCMD, "LE unknown command", -1, 1 },
3994 { F_SSRAMINTPERR, "LE request queue parity error", -1, 1 },
3998 if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE,
3999 (chip_ver <= CHELSIO_T5) ?
4000 le_intr_info : t6_le_intr_info))
4005 * MPS interrupt handler.
4007 static void mps_intr_handler(struct adapter *adapter)
4009 static const struct intr_info mps_rx_intr_info[] = {
4010 { 0xffffff, "MPS Rx parity error", -1, 1 },
4013 static const struct intr_info mps_tx_intr_info[] = {
4014 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
4015 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4016 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
4018 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
4020 { F_BUBBLE, "MPS Tx underflow", -1, 1 },
4021 { F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
4022 { F_FRMERR, "MPS Tx framing error", -1, 1 },
4025 static const struct intr_info mps_trc_intr_info[] = {
4026 { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
4027 { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
4029 { F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
4032 static const struct intr_info mps_stat_sram_intr_info[] = {
4033 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
4036 static const struct intr_info mps_stat_tx_intr_info[] = {
4037 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
4040 static const struct intr_info mps_stat_rx_intr_info[] = {
4041 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
4044 static const struct intr_info mps_cls_intr_info[] = {
4045 { F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
4046 { F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
4047 { F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
4053 fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
4055 t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
4057 t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
4058 mps_trc_intr_info) +
4059 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
4060 mps_stat_sram_intr_info) +
4061 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
4062 mps_stat_tx_intr_info) +
4063 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
4064 mps_stat_rx_intr_info) +
4065 t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
4068 t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
4069 t4_read_reg(adapter, A_MPS_INT_CAUSE); /* flush */
4071 t4_fatal_err(adapter);
4074 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | \
4078 * EDC/MC interrupt handler.
4080 static void mem_intr_handler(struct adapter *adapter, int idx)
4082 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
4084 unsigned int addr, cnt_addr, v;
4086 if (idx <= MEM_EDC1) {
4087 addr = EDC_REG(A_EDC_INT_CAUSE, idx);
4088 cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
4089 } else if (idx == MEM_MC) {
4090 if (is_t4(adapter)) {
4091 addr = A_MC_INT_CAUSE;
4092 cnt_addr = A_MC_ECC_STATUS;
4094 addr = A_MC_P_INT_CAUSE;
4095 cnt_addr = A_MC_P_ECC_STATUS;
4098 addr = MC_REG(A_MC_P_INT_CAUSE, 1);
4099 cnt_addr = MC_REG(A_MC_P_ECC_STATUS, 1);
4102 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
4103 if (v & F_PERR_INT_CAUSE)
4104 CH_ALERT(adapter, "%s FIFO parity error\n",
4106 if (v & F_ECC_CE_INT_CAUSE) {
4107 u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
4109 t4_edc_err_read(adapter, idx);
4111 t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
4112 CH_WARN_RATELIMIT(adapter,
4113 "%u %s correctable ECC data error%s\n",
4114 cnt, name[idx], cnt > 1 ? "s" : "");
4116 if (v & F_ECC_UE_INT_CAUSE)
4118 "%s uncorrectable ECC data error\n", name[idx]);
4120 t4_write_reg(adapter, addr, v);
4121 if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
4122 t4_fatal_err(adapter);
4126 * MA interrupt handler.
4128 static void ma_intr_handler(struct adapter *adapter)
4130 u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
4132 if (status & F_MEM_PERR_INT_CAUSE) {
4134 "MA parity error, parity status %#x\n",
4135 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS1));
4138 "MA parity error, parity status %#x\n",
4139 t4_read_reg(adapter,
4140 A_MA_PARITY_ERROR_STATUS2));
4142 if (status & F_MEM_WRAP_INT_CAUSE) {
4143 v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
4144 CH_ALERT(adapter, "MA address wrap-around error by "
4145 "client %u to address %#x\n",
4146 G_MEM_WRAP_CLIENT_NUM(v),
4147 G_MEM_WRAP_ADDRESS(v) << 4);
4149 t4_write_reg(adapter, A_MA_INT_CAUSE, status);
4150 t4_fatal_err(adapter);
4154 * SMB interrupt handler.
4156 static void smb_intr_handler(struct adapter *adap)
4158 static const struct intr_info smb_intr_info[] = {
4159 { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
4160 { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
4161 { F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
4165 if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
4170 * NC-SI interrupt handler.
4172 static void ncsi_intr_handler(struct adapter *adap)
4174 static const struct intr_info ncsi_intr_info[] = {
4175 { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
4176 { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
4177 { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
4178 { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
4182 if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
4187 * XGMAC interrupt handler.
4189 static void xgmac_intr_handler(struct adapter *adap, int port)
4191 u32 v, int_cause_reg;
4194 int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
4196 int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
4198 v = t4_read_reg(adap, int_cause_reg);
4200 v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR);
4204 if (v & F_TXFIFO_PRTY_ERR)
4205 CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n",
4207 if (v & F_RXFIFO_PRTY_ERR)
4208 CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n",
4210 t4_write_reg(adap, int_cause_reg, v);
4215 * PL interrupt handler.
4217 static void pl_intr_handler(struct adapter *adap)
4219 static const struct intr_info pl_intr_info[] = {
4220 { F_FATALPERR, "Fatal parity error", -1, 1 },
4221 { F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
4225 static const struct intr_info t5_pl_intr_info[] = {
4226 { F_FATALPERR, "Fatal parity error", -1, 1 },
4230 if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE,
4232 pl_intr_info : t5_pl_intr_info))
4236 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
4239 * t4_slow_intr_handler - control path interrupt handler
4240 * @adapter: the adapter
4242 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
4243 * The designation 'slow' is because it involves register reads, while
4244 * data interrupts typically don't involve any MMIOs.
4246 int t4_slow_intr_handler(struct adapter *adapter)
4248 u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
4250 if (!(cause & GLBL_INTR_MASK))
4253 cim_intr_handler(adapter);
4255 mps_intr_handler(adapter);
4257 ncsi_intr_handler(adapter);
4259 pl_intr_handler(adapter);
4261 smb_intr_handler(adapter);
4263 xgmac_intr_handler(adapter, 0);
4265 xgmac_intr_handler(adapter, 1);
4267 xgmac_intr_handler(adapter, 2);
4269 xgmac_intr_handler(adapter, 3);
4271 pcie_intr_handler(adapter);
4273 mem_intr_handler(adapter, MEM_MC);
4274 if (is_t5(adapter) && (cause & F_MC1))
4275 mem_intr_handler(adapter, MEM_MC1);
4277 mem_intr_handler(adapter, MEM_EDC0);
4279 mem_intr_handler(adapter, MEM_EDC1);
4281 le_intr_handler(adapter);
4283 tp_intr_handler(adapter);
4285 ma_intr_handler(adapter);
4286 if (cause & F_PM_TX)
4287 pmtx_intr_handler(adapter);
4288 if (cause & F_PM_RX)
4289 pmrx_intr_handler(adapter);
4290 if (cause & F_ULP_RX)
4291 ulprx_intr_handler(adapter);
4292 if (cause & F_CPL_SWITCH)
4293 cplsw_intr_handler(adapter);
4295 sge_intr_handler(adapter);
4296 if (cause & F_ULP_TX)
4297 ulptx_intr_handler(adapter);
4299 /* Clear the interrupts just processed for which we are the master. */
4300 t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
4301 (void)t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
4306 * t4_intr_enable - enable interrupts
4307 * @adapter: the adapter whose interrupts should be enabled
4309 * Enable PF-specific interrupts for the calling function and the top-level
4310 * interrupt concentrator for global interrupts. Interrupts are already
4311 * enabled at each module, here we just enable the roots of the interrupt
4314 * Note: this function should be called only when the driver manages
4315 * non PF-specific interrupts from the various HW modules. Only one PCI
4316 * function at a time should be doing this.
4318 void t4_intr_enable(struct adapter *adapter)
4321 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4322 u32 pf = (chip_id(adapter) <= CHELSIO_T5
4323 ? G_SOURCEPF(whoami)
4324 : G_T6_SOURCEPF(whoami));
4326 if (chip_id(adapter) <= CHELSIO_T5)
4327 val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
4329 val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN;
4330 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
4331 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
4332 F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR |
4333 F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
4334 F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
4335 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
4336 F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val);
4337 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
4338 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
4342 * t4_intr_disable - disable interrupts
4343 * @adapter: the adapter whose interrupts should be disabled
4345 * Disable interrupts. We only disable the top-level interrupt
4346 * concentrators. The caller must be a PCI function managing global
4349 void t4_intr_disable(struct adapter *adapter)
4351 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4352 u32 pf = (chip_id(adapter) <= CHELSIO_T5
4353 ? G_SOURCEPF(whoami)
4354 : G_T6_SOURCEPF(whoami));
4356 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
4357 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
4361 * t4_intr_clear - clear all interrupts
4362 * @adapter: the adapter whose interrupts should be cleared
4364 * Clears all interrupts. The caller must be a PCI function managing
4365 * global interrupts.
4367 void t4_intr_clear(struct adapter *adapter)
4369 static const unsigned int cause_reg[] = {
4370 A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
4371 A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
4372 A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS1, A_MA_INT_CAUSE,
4373 A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
4374 A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
4375 MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
4377 A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE,
4378 A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE,
4379 A_MPS_RX_PERR_INT_CAUSE,
4381 MYPF_REG(A_PL_PF_INT_CAUSE),
4388 for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
4389 t4_write_reg(adapter, cause_reg[i], 0xffffffff);
4391 t4_write_reg(adapter, is_t4(adapter) ? A_MC_INT_CAUSE :
4392 A_MC_P_INT_CAUSE, 0xffffffff);
4394 if (is_t4(adapter)) {
4395 t4_write_reg(adapter, A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
4397 t4_write_reg(adapter, A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
4400 t4_write_reg(adapter, A_MA_PARITY_ERROR_STATUS2, 0xffffffff);
4402 t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
4403 (void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
4407 * hash_mac_addr - return the hash value of a MAC address
4408 * @addr: the 48-bit Ethernet MAC address
4410 * Hashes a MAC address according to the hash function used by HW inexact
4411 * (hash) address matching.
4413 static int hash_mac_addr(const u8 *addr)
4415 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
4416 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
4424 * t4_config_rss_range - configure a portion of the RSS mapping table
4425 * @adapter: the adapter
4426 * @mbox: mbox to use for the FW command
4427 * @viid: virtual interface whose RSS subtable is to be written
4428 * @start: start entry in the table to write
4429 * @n: how many table entries to write
4430 * @rspq: values for the "response queue" (Ingress Queue) lookup table
4431 * @nrspq: number of values in @rspq
4433 * Programs the selected part of the VI's RSS mapping table with the
4434 * provided values. If @nrspq < @n the supplied values are used repeatedly
4435 * until the full table range is populated.
4437 * The caller must ensure the values in @rspq are in the range allowed for
4440 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
4441 int start, int n, const u16 *rspq, unsigned int nrspq)
4444 const u16 *rsp = rspq;
4445 const u16 *rsp_end = rspq + nrspq;
4446 struct fw_rss_ind_tbl_cmd cmd;
4448 memset(&cmd, 0, sizeof(cmd));
4449 cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
4450 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4451 V_FW_RSS_IND_TBL_CMD_VIID(viid));
4452 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
4455 * Each firmware RSS command can accommodate up to 32 RSS Ingress
4456 * Queue Identifiers. These Ingress Queue IDs are packed three to
4457 * a 32-bit word as 10-bit values with the upper remaining 2 bits
4461 int nq = min(n, 32);
4463 __be32 *qp = &cmd.iq0_to_iq2;
4466 * Set up the firmware RSS command header to send the next
4467 * "nq" Ingress Queue IDs to the firmware.
4469 cmd.niqid = cpu_to_be16(nq);
4470 cmd.startidx = cpu_to_be16(start);
4473 * "nq" more done for the start of the next loop.
4479 * While there are still Ingress Queue IDs to stuff into the
4480 * current firmware RSS command, retrieve them from the
4481 * Ingress Queue ID array and insert them into the command.
4485 * Grab up to the next 3 Ingress Queue IDs (wrapping
4486 * around the Ingress Queue ID array if necessary) and
4487 * insert them into the firmware RSS command at the
4488 * current 3-tuple position within the commad.
4492 int nqbuf = min(3, nq);
4495 qbuf[0] = qbuf[1] = qbuf[2] = 0;
4496 while (nqbuf && nq_packed < 32) {
4503 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
4504 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
4505 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
4509 * Send this portion of the RRS table update to the firmware;
4510 * bail out on any errors.
4512 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
4520 * t4_config_glbl_rss - configure the global RSS mode
4521 * @adapter: the adapter
4522 * @mbox: mbox to use for the FW command
4523 * @mode: global RSS mode
4524 * @flags: mode-specific flags
4526 * Sets the global RSS mode.
4528 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
4531 struct fw_rss_glb_config_cmd c;
4533 memset(&c, 0, sizeof(c));
4534 c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
4535 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
4536 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4537 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
4538 c.u.manual.mode_pkd =
4539 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
4540 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
4541 c.u.basicvirtual.mode_pkd =
4542 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
4543 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
4546 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
4550 * t4_config_vi_rss - configure per VI RSS settings
4551 * @adapter: the adapter
4552 * @mbox: mbox to use for the FW command
4555 * @defq: id of the default RSS queue for the VI.
4557 * Configures VI-specific RSS properties.
4559 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
4560 unsigned int flags, unsigned int defq)
4562 struct fw_rss_vi_config_cmd c;
4564 memset(&c, 0, sizeof(c));
4565 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
4566 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4567 V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
4568 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4569 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
4570 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
4571 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
4574 /* Read an RSS table row */
4575 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
4577 t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
4578 return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
4583 * t4_read_rss - read the contents of the RSS mapping table
4584 * @adapter: the adapter
4585 * @map: holds the contents of the RSS mapping table
4587 * Reads the contents of the RSS hash->queue mapping table.
4589 int t4_read_rss(struct adapter *adapter, u16 *map)
4594 for (i = 0; i < RSS_NENTRIES / 2; ++i) {
4595 ret = rd_rss_row(adapter, i, &val);
4598 *map++ = G_LKPTBLQUEUE0(val);
4599 *map++ = G_LKPTBLQUEUE1(val);
4605 * t4_fw_tp_pio_rw - Access TP PIO through LDST
4606 * @adap: the adapter
4607 * @vals: where the indirect register values are stored/written
4608 * @nregs: how many indirect registers to read/write
4609 * @start_idx: index of first indirect register to read/write
4610 * @rw: Read (1) or Write (0)
4612 * Access TP PIO registers through LDST
4614 void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
4615 unsigned int start_index, unsigned int rw)
4618 int cmd = FW_LDST_ADDRSPC_TP_PIO;
4619 struct fw_ldst_cmd c;
4621 for (i = 0 ; i < nregs; i++) {
4622 memset(&c, 0, sizeof(c));
4623 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
4625 (rw ? F_FW_CMD_READ :
4627 V_FW_LDST_CMD_ADDRSPACE(cmd));
4628 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
4630 c.u.addrval.addr = cpu_to_be32(start_index + i);
4631 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
4632 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4635 vals[i] = be32_to_cpu(c.u.addrval.val);
4641 * t4_read_rss_key - read the global RSS key
4642 * @adap: the adapter
4643 * @key: 10-entry array holding the 320-bit RSS key
4645 * Reads the global 320-bit RSS key.
4647 void t4_read_rss_key(struct adapter *adap, u32 *key)
4649 if (t4_use_ldst(adap))
4650 t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 1);
4652 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
4653 A_TP_RSS_SECRET_KEY0);
4657 * t4_write_rss_key - program one of the RSS keys
4658 * @adap: the adapter
4659 * @key: 10-entry array holding the 320-bit RSS key
4660 * @idx: which RSS key to write
4662 * Writes one of the RSS keys with the given 320-bit value. If @idx is
4663 * 0..15 the corresponding entry in the RSS key table is written,
4664 * otherwise the global RSS key is written.
4666 void t4_write_rss_key(struct adapter *adap, u32 *key, int idx)
4668 u8 rss_key_addr_cnt = 16;
4669 u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
4672 * T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
4673 * allows access to key addresses 16-63 by using KeyWrAddrX
4674 * as index[5:4](upper 2) into key table
4676 if ((chip_id(adap) > CHELSIO_T5) &&
4677 (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
4678 rss_key_addr_cnt = 32;
4680 if (t4_use_ldst(adap))
4681 t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 0);
4683 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
4684 A_TP_RSS_SECRET_KEY0);
4686 if (idx >= 0 && idx < rss_key_addr_cnt) {
4687 if (rss_key_addr_cnt > 16)
4688 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
4689 V_KEYWRADDRX(idx >> 4) |
4690 V_T6_VFWRADDR(idx) | F_KEYWREN);
4692 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
4693 V_KEYWRADDR(idx) | F_KEYWREN);
4698 * t4_read_rss_pf_config - read PF RSS Configuration Table
4699 * @adapter: the adapter
4700 * @index: the entry in the PF RSS table to read
4701 * @valp: where to store the returned value
4703 * Reads the PF RSS Configuration Table at the specified index and returns
4704 * the value found there.
4706 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
4709 if (t4_use_ldst(adapter))
4710 t4_fw_tp_pio_rw(adapter, valp, 1,
4711 A_TP_RSS_PF0_CONFIG + index, 1);
4713 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4714 valp, 1, A_TP_RSS_PF0_CONFIG + index);
4718 * t4_write_rss_pf_config - write PF RSS Configuration Table
4719 * @adapter: the adapter
4720 * @index: the entry in the VF RSS table to read
4721 * @val: the value to store
4723 * Writes the PF RSS Configuration Table at the specified index with the
4726 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index,
4729 if (t4_use_ldst(adapter))
4730 t4_fw_tp_pio_rw(adapter, &val, 1,
4731 A_TP_RSS_PF0_CONFIG + index, 0);
4733 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4734 &val, 1, A_TP_RSS_PF0_CONFIG + index);
4738 * t4_read_rss_vf_config - read VF RSS Configuration Table
4739 * @adapter: the adapter
4740 * @index: the entry in the VF RSS table to read
4741 * @vfl: where to store the returned VFL
4742 * @vfh: where to store the returned VFH
4744 * Reads the VF RSS Configuration Table at the specified index and returns
4745 * the (VFL, VFH) values found there.
4747 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
4750 u32 vrt, mask, data;
4752 if (chip_id(adapter) <= CHELSIO_T5) {
4753 mask = V_VFWRADDR(M_VFWRADDR);
4754 data = V_VFWRADDR(index);
4756 mask = V_T6_VFWRADDR(M_T6_VFWRADDR);
4757 data = V_T6_VFWRADDR(index);
4760 * Request that the index'th VF Table values be read into VFL/VFH.
4762 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
4763 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
4764 vrt |= data | F_VFRDEN;
4765 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
4768 * Grab the VFL/VFH values ...
4770 if (t4_use_ldst(adapter)) {
4771 t4_fw_tp_pio_rw(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, 1);
4772 t4_fw_tp_pio_rw(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, 1);
4774 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4775 vfl, 1, A_TP_RSS_VFL_CONFIG);
4776 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4777 vfh, 1, A_TP_RSS_VFH_CONFIG);
4782 * t4_write_rss_vf_config - write VF RSS Configuration Table
4784 * @adapter: the adapter
4785 * @index: the entry in the VF RSS table to write
4786 * @vfl: the VFL to store
4787 * @vfh: the VFH to store
4789 * Writes the VF RSS Configuration Table at the specified index with the
4790 * specified (VFL, VFH) values.
4792 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
4795 u32 vrt, mask, data;
4797 if (chip_id(adapter) <= CHELSIO_T5) {
4798 mask = V_VFWRADDR(M_VFWRADDR);
4799 data = V_VFWRADDR(index);
4801 mask = V_T6_VFWRADDR(M_T6_VFWRADDR);
4802 data = V_T6_VFWRADDR(index);
4806 * Load up VFL/VFH with the values to be written ...
4808 if (t4_use_ldst(adapter)) {
4809 t4_fw_tp_pio_rw(adapter, &vfl, 1, A_TP_RSS_VFL_CONFIG, 0);
4810 t4_fw_tp_pio_rw(adapter, &vfh, 1, A_TP_RSS_VFH_CONFIG, 0);
4812 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4813 &vfl, 1, A_TP_RSS_VFL_CONFIG);
4814 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4815 &vfh, 1, A_TP_RSS_VFH_CONFIG);
4819 * Write the VFL/VFH into the VF Table at index'th location.
4821 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
4822 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
4823 vrt |= data | F_VFRDEN;
4824 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
4828 * t4_read_rss_pf_map - read PF RSS Map
4829 * @adapter: the adapter
4831 * Reads the PF RSS Map register and returns its value.
4833 u32 t4_read_rss_pf_map(struct adapter *adapter)
4837 if (t4_use_ldst(adapter))
4838 t4_fw_tp_pio_rw(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, 1);
4840 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4841 &pfmap, 1, A_TP_RSS_PF_MAP);
4846 * t4_write_rss_pf_map - write PF RSS Map
4847 * @adapter: the adapter
4848 * @pfmap: PF RSS Map value
4850 * Writes the specified value to the PF RSS Map register.
4852 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap)
4854 if (t4_use_ldst(adapter))
4855 t4_fw_tp_pio_rw(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, 0);
4857 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4858 &pfmap, 1, A_TP_RSS_PF_MAP);
4862 * t4_read_rss_pf_mask - read PF RSS Mask
4863 * @adapter: the adapter
4865 * Reads the PF RSS Mask register and returns its value.
4867 u32 t4_read_rss_pf_mask(struct adapter *adapter)
4871 if (t4_use_ldst(adapter))
4872 t4_fw_tp_pio_rw(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, 1);
4874 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4875 &pfmask, 1, A_TP_RSS_PF_MSK);
4880 * t4_write_rss_pf_mask - write PF RSS Mask
4881 * @adapter: the adapter
4882 * @pfmask: PF RSS Mask value
4884 * Writes the specified value to the PF RSS Mask register.
4886 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask)
4888 if (t4_use_ldst(adapter))
4889 t4_fw_tp_pio_rw(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, 0);
4891 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4892 &pfmask, 1, A_TP_RSS_PF_MSK);
4896 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
4897 * @adap: the adapter
4898 * @v4: holds the TCP/IP counter values
4899 * @v6: holds the TCP/IPv6 counter values
4901 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
4902 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
4904 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
4905 struct tp_tcp_stats *v6)
4907 u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
4909 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
4910 #define STAT(x) val[STAT_IDX(x)]
4911 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
4914 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
4915 ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST);
4916 v4->tcp_out_rsts = STAT(OUT_RST);
4917 v4->tcp_in_segs = STAT64(IN_SEG);
4918 v4->tcp_out_segs = STAT64(OUT_SEG);
4919 v4->tcp_retrans_segs = STAT64(RXT_SEG);
4922 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
4923 ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST);
4924 v6->tcp_out_rsts = STAT(OUT_RST);
4925 v6->tcp_in_segs = STAT64(IN_SEG);
4926 v6->tcp_out_segs = STAT64(OUT_SEG);
4927 v6->tcp_retrans_segs = STAT64(RXT_SEG);
4935 * t4_tp_get_err_stats - read TP's error MIB counters
4936 * @adap: the adapter
4937 * @st: holds the counter values
4939 * Returns the values of TP's error counters.
4941 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
4943 int nchan = adap->chip_params->nchan;
4945 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
4946 st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0);
4947 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
4948 st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0);
4949 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
4950 st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0);
4951 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
4952 st->tnl_cong_drops, nchan, A_TP_MIB_TNL_CNG_DROP_0);
4953 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
4954 st->ofld_chan_drops, nchan, A_TP_MIB_OFD_CHN_DROP_0);
4955 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
4956 st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0);
4957 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
4958 st->ofld_vlan_drops, nchan, A_TP_MIB_OFD_VLN_DROP_0);
4959 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
4960 st->tcp6_in_errs, nchan, A_TP_MIB_TCP_V6IN_ERR_0);
4962 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
4963 &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP);
4967 * t4_tp_get_proxy_stats - read TP's proxy MIB counters
4968 * @adap: the adapter
4969 * @st: holds the counter values
4971 * Returns the values of TP's proxy counters.
4973 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st)
4975 int nchan = adap->chip_params->nchan;
4977 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy,
4978 nchan, A_TP_MIB_TNL_LPBK_0);
4982 * t4_tp_get_cpl_stats - read TP's CPL MIB counters
4983 * @adap: the adapter
4984 * @st: holds the counter values
4986 * Returns the values of TP's CPL counters.
4988 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
4990 int nchan = adap->chip_params->nchan;
4992 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req,
4993 nchan, A_TP_MIB_CPL_IN_REQ_0);
4994 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->rsp,
4995 nchan, A_TP_MIB_CPL_OUT_RSP_0);
4999 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
5000 * @adap: the adapter
5001 * @st: holds the counter values
5003 * Returns the values of TP's RDMA counters.
5005 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
5007 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_pkt,
5008 2, A_TP_MIB_RQE_DFR_PKT);
5012 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
5013 * @adap: the adapter
5014 * @idx: the port index
5015 * @st: holds the counter values
5017 * Returns the values of TP's FCoE counters for the selected port.
5019 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
5020 struct tp_fcoe_stats *st)
5024 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->frames_ddp,
5025 1, A_TP_MIB_FCOE_DDP_0 + idx);
5026 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->frames_drop,
5027 1, A_TP_MIB_FCOE_DROP_0 + idx);
5028 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
5029 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx);
5030 st->octets_ddp = ((u64)val[0] << 32) | val[1];
5034 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
5035 * @adap: the adapter
5036 * @st: holds the counter values
5038 * Returns the values of TP's counters for non-TCP directly-placed packets.
5040 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
5044 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4,
5046 st->frames = val[0];
5048 st->octets = ((u64)val[2] << 32) | val[3];
5052 * t4_read_mtu_tbl - returns the values in the HW path MTU table
5053 * @adap: the adapter
5054 * @mtus: where to store the MTU values
5055 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
5057 * Reads the HW path MTU table.
5059 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
5064 for (i = 0; i < NMTUS; ++i) {
5065 t4_write_reg(adap, A_TP_MTU_TABLE,
5066 V_MTUINDEX(0xff) | V_MTUVALUE(i));
5067 v = t4_read_reg(adap, A_TP_MTU_TABLE);
5068 mtus[i] = G_MTUVALUE(v);
5070 mtu_log[i] = G_MTUWIDTH(v);
5075 * t4_read_cong_tbl - reads the congestion control table
5076 * @adap: the adapter
5077 * @incr: where to store the alpha values
5079 * Reads the additive increments programmed into the HW congestion
5082 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
5084 unsigned int mtu, w;
5086 for (mtu = 0; mtu < NMTUS; ++mtu)
5087 for (w = 0; w < NCCTRL_WIN; ++w) {
5088 t4_write_reg(adap, A_TP_CCTRL_TABLE,
5089 V_ROWINDEX(0xffff) | (mtu << 5) | w);
5090 incr[mtu][w] = (u16)t4_read_reg(adap,
5091 A_TP_CCTRL_TABLE) & 0x1fff;
5096 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
5097 * @adap: the adapter
5098 * @addr: the indirect TP register address
5099 * @mask: specifies the field within the register to modify
5100 * @val: new value for the field
5102 * Sets a field of an indirect TP register to the given value.
5104 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
5105 unsigned int mask, unsigned int val)
5107 t4_write_reg(adap, A_TP_PIO_ADDR, addr);
5108 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
5109 t4_write_reg(adap, A_TP_PIO_DATA, val);
5113 * init_cong_ctrl - initialize congestion control parameters
5114 * @a: the alpha values for congestion control
5115 * @b: the beta values for congestion control
5117 * Initialize the congestion control parameters.
5119 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
5121 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
5146 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
5149 b[13] = b[14] = b[15] = b[16] = 3;
5150 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
5151 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
5156 /* The minimum additive increment value for the congestion control table */
5157 #define CC_MIN_INCR 2U
5160 * t4_load_mtus - write the MTU and congestion control HW tables
5161 * @adap: the adapter
5162 * @mtus: the values for the MTU table
5163 * @alpha: the values for the congestion control alpha parameter
5164 * @beta: the values for the congestion control beta parameter
5166 * Write the HW MTU table with the supplied MTUs and the high-speed
5167 * congestion control table with the supplied alpha, beta, and MTUs.
5168 * We write the two tables together because the additive increments
5169 * depend on the MTUs.
5171 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
5172 const unsigned short *alpha, const unsigned short *beta)
5174 static const unsigned int avg_pkts[NCCTRL_WIN] = {
5175 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
5176 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
5177 28672, 40960, 57344, 81920, 114688, 163840, 229376
5182 for (i = 0; i < NMTUS; ++i) {
5183 unsigned int mtu = mtus[i];
5184 unsigned int log2 = fls(mtu);
5186 if (!(mtu & ((1 << log2) >> 2))) /* round */
5188 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
5189 V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
5191 for (w = 0; w < NCCTRL_WIN; ++w) {
5194 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
5197 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
5198 (w << 16) | (beta[w] << 13) | inc);
5204 * t4_set_pace_tbl - set the pace table
5205 * @adap: the adapter
5206 * @pace_vals: the pace values in microseconds
5207 * @start: index of the first entry in the HW pace table to set
5208 * @n: how many entries to set
5210 * Sets (a subset of the) HW pace table.
5212 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
5213 unsigned int start, unsigned int n)
5215 unsigned int vals[NTX_SCHED], i;
5216 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
5221 /* convert values from us to dack ticks, rounding to closest value */
5222 for (i = 0; i < n; i++, pace_vals++) {
5223 vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
5224 if (vals[i] > 0x7ff)
5226 if (*pace_vals && vals[i] == 0)
5229 for (i = 0; i < n; i++, start++)
5230 t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
5235 * t4_set_sched_bps - set the bit rate for a HW traffic scheduler
5236 * @adap: the adapter
5237 * @kbps: target rate in Kbps
5238 * @sched: the scheduler index
5240 * Configure a Tx HW scheduler for the target rate.
5242 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
5244 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
5245 unsigned int clk = adap->params.vpd.cclk * 1000;
5246 unsigned int selected_cpt = 0, selected_bpt = 0;
5249 kbps *= 125; /* -> bytes */
5250 for (cpt = 1; cpt <= 255; cpt++) {
5252 bpt = (kbps + tps / 2) / tps;
5253 if (bpt > 0 && bpt <= 255) {
5255 delta = v >= kbps ? v - kbps : kbps - v;
5256 if (delta < mindelta) {
5261 } else if (selected_cpt)
5267 t4_write_reg(adap, A_TP_TM_PIO_ADDR,
5268 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
5269 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
5271 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
5273 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
5274 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
5279 * t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
5280 * @adap: the adapter
5281 * @sched: the scheduler index
5282 * @ipg: the interpacket delay in tenths of nanoseconds
5284 * Set the interpacket delay for a HW packet rate scheduler.
5286 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
5288 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
5290 /* convert ipg to nearest number of core clocks */
5291 ipg *= core_ticks_per_usec(adap);
5292 ipg = (ipg + 5000) / 10000;
5293 if (ipg > M_TXTIMERSEPQ0)
5296 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
5297 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
5299 v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
5301 v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
5302 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
5303 t4_read_reg(adap, A_TP_TM_PIO_DATA);
5308 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
5309 * clocks. The formula is
5311 * bytes/s = bytes256 * 256 * ClkFreq / 4096
5313 * which is equivalent to
5315 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
5317 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
5319 u64 v = bytes256 * adap->params.vpd.cclk;
5321 return v * 62 + v / 2;
5325 * t4_get_chan_txrate - get the current per channel Tx rates
5326 * @adap: the adapter
5327 * @nic_rate: rates for NIC traffic
5328 * @ofld_rate: rates for offloaded traffic
5330 * Return the current Tx rates in bytes/s for NIC and offloaded traffic
5333 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
5337 v = t4_read_reg(adap, A_TP_TX_TRATE);
5338 nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
5339 nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
5340 if (adap->chip_params->nchan > 2) {
5341 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
5342 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
5345 v = t4_read_reg(adap, A_TP_TX_ORATE);
5346 ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
5347 ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
5348 if (adap->chip_params->nchan > 2) {
5349 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
5350 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
5355 * t4_set_trace_filter - configure one of the tracing filters
5356 * @adap: the adapter
5357 * @tp: the desired trace filter parameters
5358 * @idx: which filter to configure
5359 * @enable: whether to enable or disable the filter
5361 * Configures one of the tracing filters available in HW. If @tp is %NULL
5362 * it indicates that the filter is already written in the register and it
5363 * just needs to be enabled or disabled.
5365 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
5366 int idx, int enable)
5368 int i, ofst = idx * 4;
5369 u32 data_reg, mask_reg, cfg;
5370 u32 multitrc = F_TRCMULTIFILTER;
5371 u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN;
5373 if (idx < 0 || idx >= NTRACE)
5376 if (tp == NULL || !enable) {
5377 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en,
5383 * TODO - After T4 data book is updated, specify the exact
5386 * See T4 data book - MPS section for a complete description
5387 * of the below if..else handling of A_MPS_TRC_CFG register
5390 cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
5391 if (cfg & F_TRCMULTIFILTER) {
5393 * If multiple tracers are enabled, then maximum
5394 * capture size is 2.5KB (FIFO size of a single channel)
5395 * minus 2 flits for CPL_TRACE_PKT header.
5397 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
5401 * If multiple tracers are disabled, to avoid deadlocks
5402 * maximum packet capture size of 9600 bytes is recommended.
5403 * Also in this mode, only trace0 can be enabled and running.
5406 if (tp->snap_len > 9600 || idx)
5410 if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 ||
5411 tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET ||
5412 tp->min_len > M_TFMINPKTSIZE)
5415 /* stop the tracer we'll be changing */
5416 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0);
5418 idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
5419 data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
5420 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
5422 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5423 t4_write_reg(adap, data_reg, tp->data[i]);
5424 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
5426 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
5427 V_TFCAPTUREMAX(tp->snap_len) |
5428 V_TFMINPKTSIZE(tp->min_len));
5429 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
5430 V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en |
5432 V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) :
5433 V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert)));
5439 * t4_get_trace_filter - query one of the tracing filters
5440 * @adap: the adapter
5441 * @tp: the current trace filter parameters
5442 * @idx: which trace filter to query
5443 * @enabled: non-zero if the filter is enabled
5445 * Returns the current settings of one of the HW tracing filters.
5447 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
5451 int i, ofst = idx * 4;
5452 u32 data_reg, mask_reg;
5454 ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
5455 ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
5458 *enabled = !!(ctla & F_TFEN);
5459 tp->port = G_TFPORT(ctla);
5460 tp->invert = !!(ctla & F_TFINVERTMATCH);
5462 *enabled = !!(ctla & F_T5_TFEN);
5463 tp->port = G_T5_TFPORT(ctla);
5464 tp->invert = !!(ctla & F_T5_TFINVERTMATCH);
5466 tp->snap_len = G_TFCAPTUREMAX(ctlb);
5467 tp->min_len = G_TFMINPKTSIZE(ctlb);
5468 tp->skip_ofst = G_TFOFFSET(ctla);
5469 tp->skip_len = G_TFLENGTH(ctla);
5471 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
5472 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
5473 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
5475 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5476 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
5477 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
5482 * t4_pmtx_get_stats - returns the HW stats from PMTX
5483 * @adap: the adapter
5484 * @cnt: where to store the count statistics
5485 * @cycles: where to store the cycle statistics
5487 * Returns performance statistics from PMTX.
5489 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5494 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
5495 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
5496 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
5498 cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
5500 t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
5501 A_PM_TX_DBG_DATA, data, 2,
5502 A_PM_TX_DBG_STAT_MSB);
5503 cycles[i] = (((u64)data[0] << 32) | data[1]);
5509 * t4_pmrx_get_stats - returns the HW stats from PMRX
5510 * @adap: the adapter
5511 * @cnt: where to store the count statistics
5512 * @cycles: where to store the cycle statistics
5514 * Returns performance statistics from PMRX.
5516 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5521 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
5522 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
5523 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
5525 cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
5527 t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
5528 A_PM_RX_DBG_DATA, data, 2,
5529 A_PM_RX_DBG_STAT_MSB);
5530 cycles[i] = (((u64)data[0] << 32) | data[1]);
5536 * t4_get_mps_bg_map - return the buffer groups associated with a port
5537 * @adap: the adapter
5538 * @idx: the port index
5540 * Returns a bitmap indicating which MPS buffer groups are associated
5541 * with the given port. Bit i is set if buffer group i is used by the
5544 static unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
5546 u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
5549 return idx == 0 ? 0xf : 0;
5550 if (n == 1 && chip_id(adap) <= CHELSIO_T5)
5551 return idx < 2 ? (3 << (2 * idx)) : 0;
5556 * t4_get_port_type_description - return Port Type string description
5557 * @port_type: firmware Port Type enumeration
5559 const char *t4_get_port_type_description(enum fw_port_type port_type)
5561 static const char *const port_type_description[] = {
5580 if (port_type < ARRAY_SIZE(port_type_description))
5581 return port_type_description[port_type];
5586 * t4_get_port_stats_offset - collect port stats relative to a previous
5588 * @adap: The adapter
5590 * @stats: Current stats to fill
5591 * @offset: Previous stats snapshot
5593 void t4_get_port_stats_offset(struct adapter *adap, int idx,
5594 struct port_stats *stats,
5595 struct port_stats *offset)
5600 t4_get_port_stats(adap, idx, stats);
5601 for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
5602 i < (sizeof(struct port_stats)/sizeof(u64)) ;
5608 * t4_get_port_stats - collect port statistics
5609 * @adap: the adapter
5610 * @idx: the port index
5611 * @p: the stats structure to fill
5613 * Collect statistics related to the given port from HW.
5615 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
5617 u32 bgmap = t4_get_mps_bg_map(adap, idx);
5620 #define GET_STAT(name) \
5621 t4_read_reg64(adap, \
5622 (is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
5623 T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
5624 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
5626 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
5628 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
5629 p->tx_octets = GET_STAT(TX_PORT_BYTES);
5630 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
5631 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
5632 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
5633 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
5634 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
5635 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
5636 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
5637 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
5638 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
5639 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
5640 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
5641 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
5642 p->tx_drop = GET_STAT(TX_PORT_DROP);
5643 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
5644 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
5645 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
5646 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
5647 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
5648 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
5649 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
5650 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
5652 if (stat_ctl & F_COUNTPAUSESTATTX) {
5653 p->tx_frames -= p->tx_pause;
5654 p->tx_octets -= p->tx_pause * 64;
5655 p->tx_mcast_frames -= p->tx_pause;
5658 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
5659 p->rx_octets = GET_STAT(RX_PORT_BYTES);
5660 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
5661 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
5662 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
5663 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
5664 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
5665 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
5666 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
5667 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
5668 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
5669 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
5670 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
5671 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
5672 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
5673 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
5674 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
5675 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
5676 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
5677 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
5678 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
5679 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
5680 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
5681 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
5682 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
5683 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
5684 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
5686 if (stat_ctl & F_COUNTPAUSESTATRX) {
5687 p->rx_frames -= p->rx_pause;
5688 p->rx_octets -= p->rx_pause * 64;
5689 p->rx_mcast_frames -= p->rx_pause;
5692 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
5693 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
5694 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
5695 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
5696 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
5697 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
5698 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
5699 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
5706 * t4_get_lb_stats - collect loopback port statistics
5707 * @adap: the adapter
5708 * @idx: the loopback port index
5709 * @p: the stats structure to fill
5711 * Return HW statistics for the given loopback port.
5713 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
5715 u32 bgmap = t4_get_mps_bg_map(adap, idx);
5717 #define GET_STAT(name) \
5718 t4_read_reg64(adap, \
5720 PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
5721 T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
5722 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
5724 p->octets = GET_STAT(BYTES);
5725 p->frames = GET_STAT(FRAMES);
5726 p->bcast_frames = GET_STAT(BCAST);
5727 p->mcast_frames = GET_STAT(MCAST);
5728 p->ucast_frames = GET_STAT(UCAST);
5729 p->error_frames = GET_STAT(ERROR);
5731 p->frames_64 = GET_STAT(64B);
5732 p->frames_65_127 = GET_STAT(65B_127B);
5733 p->frames_128_255 = GET_STAT(128B_255B);
5734 p->frames_256_511 = GET_STAT(256B_511B);
5735 p->frames_512_1023 = GET_STAT(512B_1023B);
5736 p->frames_1024_1518 = GET_STAT(1024B_1518B);
5737 p->frames_1519_max = GET_STAT(1519B_MAX);
5738 p->drop = GET_STAT(DROP_FRAMES);
5740 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
5741 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
5742 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
5743 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
5744 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
5745 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
5746 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
5747 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
5754 * t4_wol_magic_enable - enable/disable magic packet WoL
5755 * @adap: the adapter
5756 * @port: the physical port index
5757 * @addr: MAC address expected in magic packets, %NULL to disable
5759 * Enables/disables magic packet wake-on-LAN for the selected port.
5761 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
5764 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
5767 mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO);
5768 mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI);
5769 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
5771 mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO);
5772 mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI);
5773 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
5777 t4_write_reg(adap, mag_id_reg_l,
5778 (addr[2] << 24) | (addr[3] << 16) |
5779 (addr[4] << 8) | addr[5]);
5780 t4_write_reg(adap, mag_id_reg_h,
5781 (addr[0] << 8) | addr[1]);
5783 t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN,
5784 V_MAGICEN(addr != NULL));
5788 * t4_wol_pat_enable - enable/disable pattern-based WoL
5789 * @adap: the adapter
5790 * @port: the physical port index
5791 * @map: bitmap of which HW pattern filters to set
5792 * @mask0: byte mask for bytes 0-63 of a packet
5793 * @mask1: byte mask for bytes 64-127 of a packet
5794 * @crc: Ethernet CRC for selected bytes
5795 * @enable: enable/disable switch
5797 * Sets the pattern filters indicated in @map to mask out the bytes
5798 * specified in @mask0/@mask1 in received packets and compare the CRC of
5799 * the resulting packet against @crc. If @enable is %true pattern-based
5800 * WoL is enabled, otherwise disabled.
5802 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
5803 u64 mask0, u64 mask1, unsigned int crc, bool enable)
5809 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
5811 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
5814 t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
5820 #define EPIO_REG(name) \
5821 (is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \
5822 T5_PORT_REG(port, A_MAC_PORT_EPIO_##name))
5824 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
5825 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
5826 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
5828 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
5832 /* write byte masks */
5833 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
5834 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
5835 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
5836 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
5840 t4_write_reg(adap, EPIO_REG(DATA0), crc);
5841 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
5842 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
5843 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
5848 t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN);
5852 /* t4_mk_filtdelwr - create a delete filter WR
5853 * @ftid: the filter ID
5854 * @wr: the filter work request to populate
5855 * @qid: ingress queue to receive the delete notification
5857 * Creates a filter work request to delete the supplied filter. If @qid is
5858 * negative the delete notification is suppressed.
5860 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
5862 memset(wr, 0, sizeof(*wr));
5863 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
5864 wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
5865 wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
5866 V_FW_FILTER_WR_NOREPLY(qid < 0));
5867 wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
5869 wr->rx_chan_rx_rpl_iq =
5870 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
5873 #define INIT_CMD(var, cmd, rd_wr) do { \
5874 (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
5875 F_FW_CMD_REQUEST | \
5876 F_FW_CMD_##rd_wr); \
5877 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
5880 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
5884 struct fw_ldst_cmd c;
5886 memset(&c, 0, sizeof(c));
5887 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE);
5888 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
5892 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5893 c.u.addrval.addr = cpu_to_be32(addr);
5894 c.u.addrval.val = cpu_to_be32(val);
5896 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5900 * t4_mdio_rd - read a PHY register through MDIO
5901 * @adap: the adapter
5902 * @mbox: mailbox to use for the FW command
5903 * @phy_addr: the PHY address
5904 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
5905 * @reg: the register to read
5906 * @valp: where to store the value
5908 * Issues a FW command through the given mailbox to read a PHY register.
5910 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
5911 unsigned int mmd, unsigned int reg, unsigned int *valp)
5915 struct fw_ldst_cmd c;
5917 memset(&c, 0, sizeof(c));
5918 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
5919 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
5920 F_FW_CMD_REQUEST | F_FW_CMD_READ |
5922 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5923 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
5924 V_FW_LDST_CMD_MMD(mmd));
5925 c.u.mdio.raddr = cpu_to_be16(reg);
5927 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5929 *valp = be16_to_cpu(c.u.mdio.rval);
5934 * t4_mdio_wr - write a PHY register through MDIO
5935 * @adap: the adapter
5936 * @mbox: mailbox to use for the FW command
5937 * @phy_addr: the PHY address
5938 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
5939 * @reg: the register to write
5940 * @valp: value to write
5942 * Issues a FW command through the given mailbox to write a PHY register.
5944 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
5945 unsigned int mmd, unsigned int reg, unsigned int val)
5948 struct fw_ldst_cmd c;
5950 memset(&c, 0, sizeof(c));
5951 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
5952 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
5953 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
5955 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5956 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
5957 V_FW_LDST_CMD_MMD(mmd));
5958 c.u.mdio.raddr = cpu_to_be16(reg);
5959 c.u.mdio.rval = cpu_to_be16(val);
5961 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5966 * t4_sge_decode_idma_state - decode the idma state
5967 * @adap: the adapter
5968 * @state: the state idma is stuck in
5970 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
5972 static const char * const t4_decode[] = {
5974 "IDMA_PUSH_MORE_CPL_FIFO",
5975 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
5977 "IDMA_PHYSADDR_SEND_PCIEHDR",
5978 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
5979 "IDMA_PHYSADDR_SEND_PAYLOAD",
5980 "IDMA_SEND_FIFO_TO_IMSG",
5981 "IDMA_FL_REQ_DATA_FL_PREP",
5982 "IDMA_FL_REQ_DATA_FL",
5984 "IDMA_FL_H_REQ_HEADER_FL",
5985 "IDMA_FL_H_SEND_PCIEHDR",
5986 "IDMA_FL_H_PUSH_CPL_FIFO",
5987 "IDMA_FL_H_SEND_CPL",
5988 "IDMA_FL_H_SEND_IP_HDR_FIRST",
5989 "IDMA_FL_H_SEND_IP_HDR",
5990 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
5991 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
5992 "IDMA_FL_H_SEND_IP_HDR_PADDING",
5993 "IDMA_FL_D_SEND_PCIEHDR",
5994 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
5995 "IDMA_FL_D_REQ_NEXT_DATA_FL",
5996 "IDMA_FL_SEND_PCIEHDR",
5997 "IDMA_FL_PUSH_CPL_FIFO",
5999 "IDMA_FL_SEND_PAYLOAD_FIRST",
6000 "IDMA_FL_SEND_PAYLOAD",
6001 "IDMA_FL_REQ_NEXT_DATA_FL",
6002 "IDMA_FL_SEND_NEXT_PCIEHDR",
6003 "IDMA_FL_SEND_PADDING",
6004 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6005 "IDMA_FL_SEND_FIFO_TO_IMSG",
6006 "IDMA_FL_REQ_DATAFL_DONE",
6007 "IDMA_FL_REQ_HEADERFL_DONE",
6009 static const char * const t5_decode[] = {
6012 "IDMA_PUSH_MORE_CPL_FIFO",
6013 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6014 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6015 "IDMA_PHYSADDR_SEND_PCIEHDR",
6016 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6017 "IDMA_PHYSADDR_SEND_PAYLOAD",
6018 "IDMA_SEND_FIFO_TO_IMSG",
6019 "IDMA_FL_REQ_DATA_FL",
6021 "IDMA_FL_DROP_SEND_INC",
6022 "IDMA_FL_H_REQ_HEADER_FL",
6023 "IDMA_FL_H_SEND_PCIEHDR",
6024 "IDMA_FL_H_PUSH_CPL_FIFO",
6025 "IDMA_FL_H_SEND_CPL",
6026 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6027 "IDMA_FL_H_SEND_IP_HDR",
6028 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6029 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6030 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6031 "IDMA_FL_D_SEND_PCIEHDR",
6032 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6033 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6034 "IDMA_FL_SEND_PCIEHDR",
6035 "IDMA_FL_PUSH_CPL_FIFO",
6037 "IDMA_FL_SEND_PAYLOAD_FIRST",
6038 "IDMA_FL_SEND_PAYLOAD",
6039 "IDMA_FL_REQ_NEXT_DATA_FL",
6040 "IDMA_FL_SEND_NEXT_PCIEHDR",
6041 "IDMA_FL_SEND_PADDING",
6042 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6044 static const char * const t6_decode[] = {
6046 "IDMA_PUSH_MORE_CPL_FIFO",
6047 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6048 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6049 "IDMA_PHYSADDR_SEND_PCIEHDR",
6050 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6051 "IDMA_PHYSADDR_SEND_PAYLOAD",
6052 "IDMA_FL_REQ_DATA_FL",
6054 "IDMA_FL_DROP_SEND_INC",
6055 "IDMA_FL_H_REQ_HEADER_FL",
6056 "IDMA_FL_H_SEND_PCIEHDR",
6057 "IDMA_FL_H_PUSH_CPL_FIFO",
6058 "IDMA_FL_H_SEND_CPL",
6059 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6060 "IDMA_FL_H_SEND_IP_HDR",
6061 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6062 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6063 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6064 "IDMA_FL_D_SEND_PCIEHDR",
6065 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6066 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6067 "IDMA_FL_SEND_PCIEHDR",
6068 "IDMA_FL_PUSH_CPL_FIFO",
6070 "IDMA_FL_SEND_PAYLOAD_FIRST",
6071 "IDMA_FL_SEND_PAYLOAD",
6072 "IDMA_FL_REQ_NEXT_DATA_FL",
6073 "IDMA_FL_SEND_NEXT_PCIEHDR",
6074 "IDMA_FL_SEND_PADDING",
6075 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6077 static const u32 sge_regs[] = {
6078 A_SGE_DEBUG_DATA_LOW_INDEX_2,
6079 A_SGE_DEBUG_DATA_LOW_INDEX_3,
6080 A_SGE_DEBUG_DATA_HIGH_INDEX_10,
6082 const char * const *sge_idma_decode;
6083 int sge_idma_decode_nstates;
6085 unsigned int chip_version = chip_id(adapter);
6087 /* Select the right set of decode strings to dump depending on the
6088 * adapter chip type.
6090 switch (chip_version) {
6092 sge_idma_decode = (const char * const *)t4_decode;
6093 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
6097 sge_idma_decode = (const char * const *)t5_decode;
6098 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
6102 sge_idma_decode = (const char * const *)t6_decode;
6103 sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
6107 CH_ERR(adapter, "Unsupported chip version %d\n", chip_version);
6111 if (state < sge_idma_decode_nstates)
6112 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
6114 CH_WARN(adapter, "idma state %d unknown\n", state);
6116 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
6117 CH_WARN(adapter, "SGE register %#x value %#x\n",
6118 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
6122 * t4_sge_ctxt_flush - flush the SGE context cache
6123 * @adap: the adapter
6124 * @mbox: mailbox to use for the FW command
6126 * Issues a FW command through the given mailbox to flush the
6127 * SGE context cache.
6129 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
6133 struct fw_ldst_cmd c;
6135 memset(&c, 0, sizeof(c));
6136 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC);
6137 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6138 F_FW_CMD_REQUEST | F_FW_CMD_READ |
6140 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6141 c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH);
6143 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6148 * t4_fw_hello - establish communication with FW
6149 * @adap: the adapter
6150 * @mbox: mailbox to use for the FW command
6151 * @evt_mbox: mailbox to receive async FW events
6152 * @master: specifies the caller's willingness to be the device master
6153 * @state: returns the current device state (if non-NULL)
6155 * Issues a command to establish communication with FW. Returns either
6156 * an error (negative integer) or the mailbox of the Master PF.
6158 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
6159 enum dev_master master, enum dev_state *state)
6162 struct fw_hello_cmd c;
6164 unsigned int master_mbox;
6165 int retries = FW_CMD_HELLO_RETRIES;
6168 memset(&c, 0, sizeof(c));
6169 INIT_CMD(c, HELLO, WRITE);
6170 c.err_to_clearinit = cpu_to_be32(
6171 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
6172 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
6173 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ?
6174 mbox : M_FW_HELLO_CMD_MBMASTER) |
6175 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
6176 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
6177 F_FW_HELLO_CMD_CLEARINIT);
6180 * Issue the HELLO command to the firmware. If it's not successful
6181 * but indicates that we got a "busy" or "timeout" condition, retry
6182 * the HELLO until we exhaust our retry limit. If we do exceed our
6183 * retry limit, check to see if the firmware left us any error
6184 * information and report that if so ...
6186 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6187 if (ret != FW_SUCCESS) {
6188 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
6190 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
6191 t4_report_fw_error(adap);
6195 v = be32_to_cpu(c.err_to_clearinit);
6196 master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
6198 if (v & F_FW_HELLO_CMD_ERR)
6199 *state = DEV_STATE_ERR;
6200 else if (v & F_FW_HELLO_CMD_INIT)
6201 *state = DEV_STATE_INIT;
6203 *state = DEV_STATE_UNINIT;
6207 * If we're not the Master PF then we need to wait around for the
6208 * Master PF Driver to finish setting up the adapter.
6210 * Note that we also do this wait if we're a non-Master-capable PF and
6211 * there is no current Master PF; a Master PF may show up momentarily
6212 * and we wouldn't want to fail pointlessly. (This can happen when an
6213 * OS loads lots of different drivers rapidly at the same time). In
6214 * this case, the Master PF returned by the firmware will be
6215 * M_PCIE_FW_MASTER so the test below will work ...
6217 if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
6218 master_mbox != mbox) {
6219 int waiting = FW_CMD_HELLO_TIMEOUT;
6222 * Wait for the firmware to either indicate an error or
6223 * initialized state. If we see either of these we bail out
6224 * and report the issue to the caller. If we exhaust the
6225 * "hello timeout" and we haven't exhausted our retries, try
6226 * again. Otherwise bail with a timeout error.
6235 * If neither Error nor Initialialized are indicated
6236 * by the firmware keep waiting till we exhaust our
6237 * timeout ... and then retry if we haven't exhausted
6240 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
6241 if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
6252 * We either have an Error or Initialized condition
6253 * report errors preferentially.
6256 if (pcie_fw & F_PCIE_FW_ERR)
6257 *state = DEV_STATE_ERR;
6258 else if (pcie_fw & F_PCIE_FW_INIT)
6259 *state = DEV_STATE_INIT;
6263 * If we arrived before a Master PF was selected and
6264 * there's not a valid Master PF, grab its identity
6267 if (master_mbox == M_PCIE_FW_MASTER &&
6268 (pcie_fw & F_PCIE_FW_MASTER_VLD))
6269 master_mbox = G_PCIE_FW_MASTER(pcie_fw);
6278 * t4_fw_bye - end communication with FW
6279 * @adap: the adapter
6280 * @mbox: mailbox to use for the FW command
6282 * Issues a command to terminate communication with FW.
6284 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
6286 struct fw_bye_cmd c;
6288 memset(&c, 0, sizeof(c));
6289 INIT_CMD(c, BYE, WRITE);
6290 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6294 * t4_fw_reset - issue a reset to FW
6295 * @adap: the adapter
6296 * @mbox: mailbox to use for the FW command
6297 * @reset: specifies the type of reset to perform
6299 * Issues a reset command of the specified type to FW.
6301 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
6303 struct fw_reset_cmd c;
6305 memset(&c, 0, sizeof(c));
6306 INIT_CMD(c, RESET, WRITE);
6307 c.val = cpu_to_be32(reset);
6308 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6312 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
6313 * @adap: the adapter
6314 * @mbox: mailbox to use for the FW RESET command (if desired)
6315 * @force: force uP into RESET even if FW RESET command fails
6317 * Issues a RESET command to firmware (if desired) with a HALT indication
6318 * and then puts the microprocessor into RESET state. The RESET command
6319 * will only be issued if a legitimate mailbox is provided (mbox <=
6320 * M_PCIE_FW_MASTER).
6322 * This is generally used in order for the host to safely manipulate the
6323 * adapter without fear of conflicting with whatever the firmware might
6324 * be doing. The only way out of this state is to RESTART the firmware
6327 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
6332 * If a legitimate mailbox is provided, issue a RESET command
6333 * with a HALT indication.
6335 if (mbox <= M_PCIE_FW_MASTER) {
6336 struct fw_reset_cmd c;
6338 memset(&c, 0, sizeof(c));
6339 INIT_CMD(c, RESET, WRITE);
6340 c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
6341 c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
6342 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6346 * Normally we won't complete the operation if the firmware RESET
6347 * command fails but if our caller insists we'll go ahead and put the
6348 * uP into RESET. This can be useful if the firmware is hung or even
6349 * missing ... We'll have to take the risk of putting the uP into
6350 * RESET without the cooperation of firmware in that case.
6352 * We also force the firmware's HALT flag to be on in case we bypassed
6353 * the firmware RESET command above or we're dealing with old firmware
6354 * which doesn't have the HALT capability. This will serve as a flag
6355 * for the incoming firmware to know that it's coming out of a HALT
6356 * rather than a RESET ... if it's new enough to understand that ...
6358 if (ret == 0 || force) {
6359 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
6360 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
6365 * And we always return the result of the firmware RESET command
6366 * even when we force the uP into RESET ...
6372 * t4_fw_restart - restart the firmware by taking the uP out of RESET
6373 * @adap: the adapter
6374 * @reset: if we want to do a RESET to restart things
6376 * Restart firmware previously halted by t4_fw_halt(). On successful
6377 * return the previous PF Master remains as the new PF Master and there
6378 * is no need to issue a new HELLO command, etc.
6380 * We do this in two ways:
6382 * 1. If we're dealing with newer firmware we'll simply want to take
6383 * the chip's microprocessor out of RESET. This will cause the
6384 * firmware to start up from its start vector. And then we'll loop
6385 * until the firmware indicates it's started again (PCIE_FW.HALT
6386 * reset to 0) or we timeout.
6388 * 2. If we're dealing with older firmware then we'll need to RESET
6389 * the chip since older firmware won't recognize the PCIE_FW.HALT
6390 * flag and automatically RESET itself on startup.
6392 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
6396 * Since we're directing the RESET instead of the firmware
6397 * doing it automatically, we need to clear the PCIE_FW.HALT
6400 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
6403 * If we've been given a valid mailbox, first try to get the
6404 * firmware to do the RESET. If that works, great and we can
6405 * return success. Otherwise, if we haven't been given a
6406 * valid mailbox or the RESET command failed, fall back to
6407 * hitting the chip with a hammer.
6409 if (mbox <= M_PCIE_FW_MASTER) {
6410 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
6412 if (t4_fw_reset(adap, mbox,
6413 F_PIORST | F_PIORSTMODE) == 0)
6417 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
6422 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
6423 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
6424 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
6435 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
6436 * @adap: the adapter
6437 * @mbox: mailbox to use for the FW RESET command (if desired)
6438 * @fw_data: the firmware image to write
6440 * @force: force upgrade even if firmware doesn't cooperate
6442 * Perform all of the steps necessary for upgrading an adapter's
6443 * firmware image. Normally this requires the cooperation of the
6444 * existing firmware in order to halt all existing activities
6445 * but if an invalid mailbox token is passed in we skip that step
6446 * (though we'll still put the adapter microprocessor into RESET in
6449 * On successful return the new firmware will have been loaded and
6450 * the adapter will have been fully RESET losing all previous setup
6451 * state. On unsuccessful return the adapter may be completely hosed ...
6452 * positive errno indicates that the adapter is ~probably~ intact, a
6453 * negative errno indicates that things are looking bad ...
6455 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
6456 const u8 *fw_data, unsigned int size, int force)
6458 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
6459 unsigned int bootstrap =
6460 be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
6463 if (!t4_fw_matches_chip(adap, fw_hdr))
6467 ret = t4_fw_halt(adap, mbox, force);
6468 if (ret < 0 && !force)
6472 ret = t4_load_fw(adap, fw_data, size);
6473 if (ret < 0 || bootstrap)
6477 * Older versions of the firmware don't understand the new
6478 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
6479 * restart. So for newly loaded older firmware we'll have to do the
6480 * RESET for it so it starts up on a clean slate. We can tell if
6481 * the newly loaded firmware will handle this right by checking
6482 * its header flags to see if it advertises the capability.
6484 reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
6485 return t4_fw_restart(adap, mbox, reset);
6489 * t4_fw_initialize - ask FW to initialize the device
6490 * @adap: the adapter
6491 * @mbox: mailbox to use for the FW command
6493 * Issues a command to FW to partially initialize the device. This
6494 * performs initialization that generally doesn't depend on user input.
6496 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
6498 struct fw_initialize_cmd c;
6500 memset(&c, 0, sizeof(c));
6501 INIT_CMD(c, INITIALIZE, WRITE);
6502 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6506 * t4_query_params_rw - query FW or device parameters
6507 * @adap: the adapter
6508 * @mbox: mailbox to use for the FW command
6511 * @nparams: the number of parameters
6512 * @params: the parameter names
6513 * @val: the parameter values
6514 * @rw: Write and read flag
6516 * Reads the value of FW or device parameters. Up to 7 parameters can be
6519 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
6520 unsigned int vf, unsigned int nparams, const u32 *params,
6524 struct fw_params_cmd c;
6525 __be32 *p = &c.param[0].mnem;
6530 memset(&c, 0, sizeof(c));
6531 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
6532 F_FW_CMD_REQUEST | F_FW_CMD_READ |
6533 V_FW_PARAMS_CMD_PFN(pf) |
6534 V_FW_PARAMS_CMD_VFN(vf));
6535 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6537 for (i = 0; i < nparams; i++) {
6538 *p++ = cpu_to_be32(*params++);
6540 *p = cpu_to_be32(*(val + i));
6544 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6546 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
6547 *val++ = be32_to_cpu(*p);
6551 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
6552 unsigned int vf, unsigned int nparams, const u32 *params,
6555 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
6559 * t4_set_params_timeout - sets FW or device parameters
6560 * @adap: the adapter
6561 * @mbox: mailbox to use for the FW command
6564 * @nparams: the number of parameters
6565 * @params: the parameter names
6566 * @val: the parameter values
6567 * @timeout: the timeout time
6569 * Sets the value of FW or device parameters. Up to 7 parameters can be
6570 * specified at once.
6572 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
6573 unsigned int pf, unsigned int vf,
6574 unsigned int nparams, const u32 *params,
6575 const u32 *val, int timeout)
6577 struct fw_params_cmd c;
6578 __be32 *p = &c.param[0].mnem;
6583 memset(&c, 0, sizeof(c));
6584 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
6585 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
6586 V_FW_PARAMS_CMD_PFN(pf) |
6587 V_FW_PARAMS_CMD_VFN(vf));
6588 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6591 *p++ = cpu_to_be32(*params++);
6592 *p++ = cpu_to_be32(*val++);
6595 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
6599 * t4_set_params - sets FW or device parameters
6600 * @adap: the adapter
6601 * @mbox: mailbox to use for the FW command
6604 * @nparams: the number of parameters
6605 * @params: the parameter names
6606 * @val: the parameter values
6608 * Sets the value of FW or device parameters. Up to 7 parameters can be
6609 * specified at once.
6611 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
6612 unsigned int vf, unsigned int nparams, const u32 *params,
6615 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
6616 FW_CMD_MAX_TIMEOUT);
6620 * t4_cfg_pfvf - configure PF/VF resource limits
6621 * @adap: the adapter
6622 * @mbox: mailbox to use for the FW command
6623 * @pf: the PF being configured
6624 * @vf: the VF being configured
6625 * @txq: the max number of egress queues
6626 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
6627 * @rxqi: the max number of interrupt-capable ingress queues
6628 * @rxq: the max number of interruptless ingress queues
6629 * @tc: the PCI traffic class
6630 * @vi: the max number of virtual interfaces
6631 * @cmask: the channel access rights mask for the PF/VF
6632 * @pmask: the port access rights mask for the PF/VF
6633 * @nexact: the maximum number of exact MPS filters
6634 * @rcaps: read capabilities
6635 * @wxcaps: write/execute capabilities
6637 * Configures resource limits and capabilities for a physical or virtual
6640 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
6641 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
6642 unsigned int rxqi, unsigned int rxq, unsigned int tc,
6643 unsigned int vi, unsigned int cmask, unsigned int pmask,
6644 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
6646 struct fw_pfvf_cmd c;
6648 memset(&c, 0, sizeof(c));
6649 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
6650 F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
6651 V_FW_PFVF_CMD_VFN(vf));
6652 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6653 c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
6654 V_FW_PFVF_CMD_NIQ(rxq));
6655 c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) |
6656 V_FW_PFVF_CMD_PMASK(pmask) |
6657 V_FW_PFVF_CMD_NEQ(txq));
6658 c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) |
6659 V_FW_PFVF_CMD_NVI(vi) |
6660 V_FW_PFVF_CMD_NEXACTF(nexact));
6661 c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) |
6662 V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
6663 V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
6664 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6668 * t4_alloc_vi_func - allocate a virtual interface
6669 * @adap: the adapter
6670 * @mbox: mailbox to use for the FW command
6671 * @port: physical port associated with the VI
6672 * @pf: the PF owning the VI
6673 * @vf: the VF owning the VI
6674 * @nmac: number of MAC addresses needed (1 to 5)
6675 * @mac: the MAC addresses of the VI
6676 * @rss_size: size of RSS table slice associated with this VI
6677 * @portfunc: which Port Application Function MAC Address is desired
6678 * @idstype: Intrusion Detection Type
6680 * Allocates a virtual interface for the given physical port. If @mac is
6681 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
6682 * If @rss_size is %NULL the VI is not assigned any RSS slice by FW.
6683 * @mac should be large enough to hold @nmac Ethernet addresses, they are
6684 * stored consecutively so the space needed is @nmac * 6 bytes.
6685 * Returns a negative error number or the non-negative VI id.
6687 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
6688 unsigned int port, unsigned int pf, unsigned int vf,
6689 unsigned int nmac, u8 *mac, u16 *rss_size,
6690 unsigned int portfunc, unsigned int idstype)
6695 memset(&c, 0, sizeof(c));
6696 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
6697 F_FW_CMD_WRITE | F_FW_CMD_EXEC |
6698 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
6699 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
6700 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
6701 V_FW_VI_CMD_FUNC(portfunc));
6702 c.portid_pkd = V_FW_VI_CMD_PORTID(port);
6705 c.norss_rsssize = F_FW_VI_CMD_NORSS;
6707 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6712 memcpy(mac, c.mac, sizeof(c.mac));
6715 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
6717 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
6719 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
6721 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
6725 *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
6726 return G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid));
6730 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface
6731 * @adap: the adapter
6732 * @mbox: mailbox to use for the FW command
6733 * @port: physical port associated with the VI
6734 * @pf: the PF owning the VI
6735 * @vf: the VF owning the VI
6736 * @nmac: number of MAC addresses needed (1 to 5)
6737 * @mac: the MAC addresses of the VI
6738 * @rss_size: size of RSS table slice associated with this VI
6740 * backwards compatible and convieniance routine to allocate a Virtual
6741 * Interface with a Ethernet Port Application Function and Intrustion
6742 * Detection System disabled.
6744 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
6745 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
6748 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
6753 * t4_free_vi - free a virtual interface
6754 * @adap: the adapter
6755 * @mbox: mailbox to use for the FW command
6756 * @pf: the PF owning the VI
6757 * @vf: the VF owning the VI
6758 * @viid: virtual interface identifiler
6760 * Free a previously allocated virtual interface.
6762 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
6763 unsigned int vf, unsigned int viid)
6767 memset(&c, 0, sizeof(c));
6768 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
6771 V_FW_VI_CMD_PFN(pf) |
6772 V_FW_VI_CMD_VFN(vf));
6773 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
6774 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
6776 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6780 * t4_set_rxmode - set Rx properties of a virtual interface
6781 * @adap: the adapter
6782 * @mbox: mailbox to use for the FW command
6784 * @mtu: the new MTU or -1
6785 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
6786 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
6787 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
6788 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
6789 * @sleep_ok: if true we may sleep while awaiting command completion
6791 * Sets Rx properties of a virtual interface.
6793 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
6794 int mtu, int promisc, int all_multi, int bcast, int vlanex,
6797 struct fw_vi_rxmode_cmd c;
6799 /* convert to FW values */
6801 mtu = M_FW_VI_RXMODE_CMD_MTU;
6803 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
6805 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
6807 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
6809 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
6811 memset(&c, 0, sizeof(c));
6812 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
6813 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
6814 V_FW_VI_RXMODE_CMD_VIID(viid));
6815 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6817 cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
6818 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
6819 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
6820 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
6821 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
6822 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
6826 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
6827 * @adap: the adapter
6828 * @mbox: mailbox to use for the FW command
6830 * @free: if true any existing filters for this VI id are first removed
6831 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
6832 * @addr: the MAC address(es)
6833 * @idx: where to store the index of each allocated filter
6834 * @hash: pointer to hash address filter bitmap
6835 * @sleep_ok: call is allowed to sleep
6837 * Allocates an exact-match filter for each of the supplied addresses and
6838 * sets it to the corresponding address. If @idx is not %NULL it should
6839 * have at least @naddr entries, each of which will be set to the index of
6840 * the filter allocated for the corresponding MAC address. If a filter
6841 * could not be allocated for an address its index is set to 0xffff.
6842 * If @hash is not %NULL addresses that fail to allocate an exact filter
6843 * are hashed and update the hash filter bitmap pointed at by @hash.
6845 * Returns a negative error number or the number of filters allocated.
6847 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
6848 unsigned int viid, bool free, unsigned int naddr,
6849 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
6851 int offset, ret = 0;
6852 struct fw_vi_mac_cmd c;
6853 unsigned int nfilters = 0;
6854 unsigned int max_naddr = adap->chip_params->mps_tcam_size;
6855 unsigned int rem = naddr;
6857 if (naddr > max_naddr)
6860 for (offset = 0; offset < naddr ; /**/) {
6861 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
6863 : ARRAY_SIZE(c.u.exact));
6864 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
6865 u.exact[fw_naddr]), 16);
6866 struct fw_vi_mac_exact *p;
6869 memset(&c, 0, sizeof(c));
6870 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
6873 V_FW_CMD_EXEC(free) |
6874 V_FW_VI_MAC_CMD_VIID(viid));
6875 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) |
6876 V_FW_CMD_LEN16(len16));
6878 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
6880 cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
6881 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
6882 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
6886 * It's okay if we run out of space in our MAC address arena.
6887 * Some of the addresses we submit may get stored so we need
6888 * to run through the reply to see what the results were ...
6890 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
6891 if (ret && ret != -FW_ENOMEM)
6894 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
6895 u16 index = G_FW_VI_MAC_CMD_IDX(
6896 be16_to_cpu(p->valid_to_idx));
6899 idx[offset+i] = (index >= max_naddr
6902 if (index < max_naddr)
6905 *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
6913 if (ret == 0 || ret == -FW_ENOMEM)
6919 * t4_change_mac - modifies the exact-match filter for a MAC address
6920 * @adap: the adapter
6921 * @mbox: mailbox to use for the FW command
6923 * @idx: index of existing filter for old value of MAC address, or -1
6924 * @addr: the new MAC address value
6925 * @persist: whether a new MAC allocation should be persistent
6926 * @add_smt: if true also add the address to the HW SMT
6928 * Modifies an exact-match filter and sets it to the new MAC address if
6929 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
6930 * latter case the address is added persistently if @persist is %true.
6932 * Note that in general it is not possible to modify the value of a given
6933 * filter so the generic way to modify an address filter is to free the one
6934 * being used by the old address value and allocate a new filter for the
6935 * new address value.
6937 * Returns a negative error number or the index of the filter with the new
6938 * MAC value. Note that this index may differ from @idx.
6940 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
6941 int idx, const u8 *addr, bool persist, bool add_smt)
6944 struct fw_vi_mac_cmd c;
6945 struct fw_vi_mac_exact *p = c.u.exact;
6946 unsigned int max_mac_addr = adap->chip_params->mps_tcam_size;
6948 if (idx < 0) /* new allocation */
6949 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
6950 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
6952 memset(&c, 0, sizeof(c));
6953 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
6954 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
6955 V_FW_VI_MAC_CMD_VIID(viid));
6956 c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1));
6957 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
6958 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
6959 V_FW_VI_MAC_CMD_IDX(idx));
6960 memcpy(p->macaddr, addr, sizeof(p->macaddr));
6962 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6964 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
6965 if (ret >= max_mac_addr)
6972 * t4_set_addr_hash - program the MAC inexact-match hash filter
6973 * @adap: the adapter
6974 * @mbox: mailbox to use for the FW command
6976 * @ucast: whether the hash filter should also match unicast addresses
6977 * @vec: the value to be written to the hash filter
6978 * @sleep_ok: call is allowed to sleep
6980 * Sets the 64-bit inexact-match hash filter for a virtual interface.
6982 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
6983 bool ucast, u64 vec, bool sleep_ok)
6985 struct fw_vi_mac_cmd c;
6988 memset(&c, 0, sizeof(c));
6989 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
6990 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
6991 V_FW_VI_ENABLE_CMD_VIID(viid));
6992 val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) |
6993 V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1);
6994 c.freemacs_to_len16 = cpu_to_be32(val);
6995 c.u.hash.hashvec = cpu_to_be64(vec);
6996 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7000 * t4_enable_vi_params - enable/disable a virtual interface
7001 * @adap: the adapter
7002 * @mbox: mailbox to use for the FW command
7004 * @rx_en: 1=enable Rx, 0=disable Rx
7005 * @tx_en: 1=enable Tx, 0=disable Tx
7006 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
7008 * Enables/disables a virtual interface. Note that setting DCB Enable
7009 * only makes sense when enabling a Virtual Interface ...
7011 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
7012 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
7014 struct fw_vi_enable_cmd c;
7016 memset(&c, 0, sizeof(c));
7017 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
7018 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7019 V_FW_VI_ENABLE_CMD_VIID(viid));
7020 c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
7021 V_FW_VI_ENABLE_CMD_EEN(tx_en) |
7022 V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
7024 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
7028 * t4_enable_vi - enable/disable a virtual interface
7029 * @adap: the adapter
7030 * @mbox: mailbox to use for the FW command
7032 * @rx_en: 1=enable Rx, 0=disable Rx
7033 * @tx_en: 1=enable Tx, 0=disable Tx
7035 * Enables/disables a virtual interface. Note that setting DCB Enable
7036 * only makes sense when enabling a Virtual Interface ...
7038 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
7039 bool rx_en, bool tx_en)
7041 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
7045 * t4_identify_port - identify a VI's port by blinking its LED
7046 * @adap: the adapter
7047 * @mbox: mailbox to use for the FW command
7049 * @nblinks: how many times to blink LED at 2.5 Hz
7051 * Identifies a VI's port by blinking its LED.
7053 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
7054 unsigned int nblinks)
7056 struct fw_vi_enable_cmd c;
7058 memset(&c, 0, sizeof(c));
7059 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
7060 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7061 V_FW_VI_ENABLE_CMD_VIID(viid));
7062 c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
7063 c.blinkdur = cpu_to_be16(nblinks);
7064 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7068 * t4_iq_stop - stop an ingress queue and its FLs
7069 * @adap: the adapter
7070 * @mbox: mailbox to use for the FW command
7071 * @pf: the PF owning the queues
7072 * @vf: the VF owning the queues
7073 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
7074 * @iqid: ingress queue id
7075 * @fl0id: FL0 queue id or 0xffff if no attached FL0
7076 * @fl1id: FL1 queue id or 0xffff if no attached FL1
7078 * Stops an ingress queue and its associated FLs, if any. This causes
7079 * any current or future data/messages destined for these queues to be
7082 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
7083 unsigned int vf, unsigned int iqtype, unsigned int iqid,
7084 unsigned int fl0id, unsigned int fl1id)
7088 memset(&c, 0, sizeof(c));
7089 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
7090 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
7091 V_FW_IQ_CMD_VFN(vf));
7092 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c));
7093 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
7094 c.iqid = cpu_to_be16(iqid);
7095 c.fl0id = cpu_to_be16(fl0id);
7096 c.fl1id = cpu_to_be16(fl1id);
7097 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7101 * t4_iq_free - free an ingress queue and its FLs
7102 * @adap: the adapter
7103 * @mbox: mailbox to use for the FW command
7104 * @pf: the PF owning the queues
7105 * @vf: the VF owning the queues
7106 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
7107 * @iqid: ingress queue id
7108 * @fl0id: FL0 queue id or 0xffff if no attached FL0
7109 * @fl1id: FL1 queue id or 0xffff if no attached FL1
7111 * Frees an ingress queue and its associated FLs, if any.
7113 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7114 unsigned int vf, unsigned int iqtype, unsigned int iqid,
7115 unsigned int fl0id, unsigned int fl1id)
7119 memset(&c, 0, sizeof(c));
7120 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
7121 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
7122 V_FW_IQ_CMD_VFN(vf));
7123 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
7124 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
7125 c.iqid = cpu_to_be16(iqid);
7126 c.fl0id = cpu_to_be16(fl0id);
7127 c.fl1id = cpu_to_be16(fl1id);
7128 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7132 * t4_eth_eq_free - free an Ethernet egress queue
7133 * @adap: the adapter
7134 * @mbox: mailbox to use for the FW command
7135 * @pf: the PF owning the queue
7136 * @vf: the VF owning the queue
7137 * @eqid: egress queue id
7139 * Frees an Ethernet egress queue.
7141 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7142 unsigned int vf, unsigned int eqid)
7144 struct fw_eq_eth_cmd c;
7146 memset(&c, 0, sizeof(c));
7147 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
7148 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7149 V_FW_EQ_ETH_CMD_PFN(pf) |
7150 V_FW_EQ_ETH_CMD_VFN(vf));
7151 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
7152 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
7153 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7157 * t4_ctrl_eq_free - free a control egress queue
7158 * @adap: the adapter
7159 * @mbox: mailbox to use for the FW command
7160 * @pf: the PF owning the queue
7161 * @vf: the VF owning the queue
7162 * @eqid: egress queue id
7164 * Frees a control egress queue.
7166 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7167 unsigned int vf, unsigned int eqid)
7169 struct fw_eq_ctrl_cmd c;
7171 memset(&c, 0, sizeof(c));
7172 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
7173 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7174 V_FW_EQ_CTRL_CMD_PFN(pf) |
7175 V_FW_EQ_CTRL_CMD_VFN(vf));
7176 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
7177 c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
7178 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7182 * t4_ofld_eq_free - free an offload egress queue
7183 * @adap: the adapter
7184 * @mbox: mailbox to use for the FW command
7185 * @pf: the PF owning the queue
7186 * @vf: the VF owning the queue
7187 * @eqid: egress queue id
7189 * Frees a control egress queue.
7191 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7192 unsigned int vf, unsigned int eqid)
7194 struct fw_eq_ofld_cmd c;
7196 memset(&c, 0, sizeof(c));
7197 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) |
7198 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7199 V_FW_EQ_OFLD_CMD_PFN(pf) |
7200 V_FW_EQ_OFLD_CMD_VFN(vf));
7201 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
7202 c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid));
7203 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7207 * t4_link_down_rc_str - return a string for a Link Down Reason Code
7208 * @link_down_rc: Link Down Reason Code
7210 * Returns a string representation of the Link Down Reason Code.
7212 const char *t4_link_down_rc_str(unsigned char link_down_rc)
7214 static const char *reason[] = {
7217 "Auto-negotiation Failure",
7219 "Insufficient Airflow",
7220 "Unable To Determine Reason",
7221 "No RX Signal Detected",
7225 if (link_down_rc >= ARRAY_SIZE(reason))
7226 return "Bad Reason Code";
7228 return reason[link_down_rc];
7232 * t4_handle_fw_rpl - process a FW reply message
7233 * @adap: the adapter
7234 * @rpl: start of the FW message
7236 * Processes a FW message, such as link state change messages.
7238 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
7240 u8 opcode = *(const u8 *)rpl;
7241 const struct fw_port_cmd *p = (const void *)rpl;
7242 unsigned int action =
7243 G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
7245 if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
7246 /* link/module state change message */
7247 int speed = 0, fc = 0, i;
7248 int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
7249 struct port_info *pi = NULL;
7250 struct link_config *lc;
7251 u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
7252 int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
7253 u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
7255 if (stat & F_FW_PORT_CMD_RXPAUSE)
7257 if (stat & F_FW_PORT_CMD_TXPAUSE)
7259 if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
7261 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
7263 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
7265 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
7268 for_each_port(adap, i) {
7269 pi = adap2pinfo(adap, i);
7270 if (pi->tx_chan == chan)
7275 if (mod != pi->mod_type) {
7277 t4_os_portmod_changed(adap, i);
7279 if (link_ok != lc->link_ok || speed != lc->speed ||
7280 fc != lc->fc) { /* something changed */
7283 if (!link_ok && lc->link_ok)
7284 reason = G_FW_PORT_CMD_LINKDNRC(stat);
7288 lc->link_ok = link_ok;
7291 lc->supported = be16_to_cpu(p->u.info.pcap);
7292 t4_os_link_changed(adap, i, link_ok, reason);
7295 CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode);
7302 * get_pci_mode - determine a card's PCI mode
7303 * @adapter: the adapter
7304 * @p: where to store the PCI settings
7306 * Determines a card's PCI mode and associated parameters, such as speed
7309 static void get_pci_mode(struct adapter *adapter,
7310 struct pci_params *p)
7315 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
7317 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
7318 p->speed = val & PCI_EXP_LNKSTA_CLS;
7319 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
7324 * init_link_config - initialize a link's SW state
7325 * @lc: structure holding the link state
7326 * @caps: link capabilities
7328 * Initializes the SW state maintained for each link, including the link's
7329 * capabilities and default speed/flow-control/autonegotiation settings.
7331 static void init_link_config(struct link_config *lc, unsigned int caps)
7333 lc->supported = caps;
7334 lc->requested_speed = 0;
7336 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
7337 if (lc->supported & FW_PORT_CAP_ANEG) {
7338 lc->advertising = lc->supported & ADVERT_MASK;
7339 lc->autoneg = AUTONEG_ENABLE;
7340 lc->requested_fc |= PAUSE_AUTONEG;
7342 lc->advertising = 0;
7343 lc->autoneg = AUTONEG_DISABLE;
7348 u32 vendor_and_model_id;
7352 int t4_get_flash_params(struct adapter *adapter)
7355 * Table for non-Numonix supported flash parts. Numonix parts are left
7356 * to the preexisting well-tested code. All flash parts have 64KB
7359 static struct flash_desc supported_flash[] = {
7360 { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
7366 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
7368 ret = sf1_read(adapter, 3, 0, 1, &info);
7369 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
7373 for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret)
7374 if (supported_flash[ret].vendor_and_model_id == info) {
7375 adapter->params.sf_size = supported_flash[ret].size_mb;
7376 adapter->params.sf_nsec =
7377 adapter->params.sf_size / SF_SEC_SIZE;
7381 if ((info & 0xff) != 0x20) /* not a Numonix flash */
7383 info >>= 16; /* log2 of size */
7384 if (info >= 0x14 && info < 0x18)
7385 adapter->params.sf_nsec = 1 << (info - 16);
7386 else if (info == 0x18)
7387 adapter->params.sf_nsec = 64;
7390 adapter->params.sf_size = 1 << info;
7393 * We should ~probably~ reject adapters with FLASHes which are too
7394 * small but we have some legacy FPGAs with small FLASHes that we'd
7395 * still like to use. So instead we emit a scary message ...
7397 if (adapter->params.sf_size < FLASH_MIN_SIZE)
7398 CH_WARN(adapter, "WARNING!!! FLASH size %#x < %#x!!!\n",
7399 adapter->params.sf_size, FLASH_MIN_SIZE);
7404 static void set_pcie_completion_timeout(struct adapter *adapter,
7410 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
7412 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
7415 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
7419 static const struct chip_params *get_chip_params(int chipid)
7421 static const struct chip_params chip_params[] = {
7425 .pm_stats_cnt = PM_NSTATS,
7426 .cng_ch_bits_log = 2,
7428 .cim_num_obq = CIM_NUM_OBQ,
7429 .mps_rplc_size = 128,
7431 .sge_fl_db = F_DBPRIO,
7432 .mps_tcam_size = NUM_MPS_CLS_SRAM_L_INSTANCES,
7437 .pm_stats_cnt = PM_NSTATS,
7438 .cng_ch_bits_log = 2,
7440 .cim_num_obq = CIM_NUM_OBQ_T5,
7441 .mps_rplc_size = 128,
7443 .sge_fl_db = F_DBPRIO | F_DBTYPE,
7444 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
7449 .pm_stats_cnt = T6_PM_NSTATS,
7450 .cng_ch_bits_log = 3,
7452 .cim_num_obq = CIM_NUM_OBQ_T5,
7453 .mps_rplc_size = 256,
7456 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
7460 chipid -= CHELSIO_T4;
7461 if (chipid < 0 || chipid >= ARRAY_SIZE(chip_params))
7464 return &chip_params[chipid];
7468 * t4_prep_adapter - prepare SW and HW for operation
7469 * @adapter: the adapter
7470 * @buf: temporary space of at least VPD_LEN size provided by the caller.
7472 * Initialize adapter SW state for the various HW modules, set initial
7473 * values for some adapter tunables, take PHYs out of reset, and
7474 * initialize the MDIO interface.
7476 int t4_prep_adapter(struct adapter *adapter, u8 *buf)
7482 get_pci_mode(adapter, &adapter->params.pci);
7484 pl_rev = t4_read_reg(adapter, A_PL_REV);
7485 adapter->params.chipid = G_CHIPID(pl_rev);
7486 adapter->params.rev = G_REV(pl_rev);
7487 if (adapter->params.chipid == 0) {
7488 /* T4 did not have chipid in PL_REV (T5 onwards do) */
7489 adapter->params.chipid = CHELSIO_T4;
7491 /* T4A1 chip is not supported */
7492 if (adapter->params.rev == 1) {
7493 CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n");
7498 adapter->chip_params = get_chip_params(chip_id(adapter));
7499 if (adapter->chip_params == NULL)
7502 adapter->params.pci.vpd_cap_addr =
7503 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
7505 ret = t4_get_flash_params(adapter);
7509 ret = get_vpd_params(adapter, &adapter->params.vpd, buf);
7513 /* Cards with real ASICs have the chipid in the PCIe device id */
7514 t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id);
7515 if (device_id >> 12 == chip_id(adapter))
7516 adapter->params.cim_la_size = CIMLA_SIZE;
7519 adapter->params.fpga = 1;
7520 adapter->params.cim_la_size = 2 * CIMLA_SIZE;
7523 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
7526 * Default port and clock for debugging in case we can't reach FW.
7528 adapter->params.nports = 1;
7529 adapter->params.portvec = 1;
7530 adapter->params.vpd.cclk = 50000;
7532 /* Set pci completion timeout value to 4 seconds. */
7533 set_pcie_completion_timeout(adapter, 0xd);
7538 * t4_shutdown_adapter - shut down adapter, host & wire
7539 * @adapter: the adapter
7541 * Perform an emergency shutdown of the adapter and stop it from
7542 * continuing any further communication on the ports or DMA to the
7543 * host. This is typically used when the adapter and/or firmware
7544 * have crashed and we want to prevent any further accidental
7545 * communication with the rest of the world. This will also force
7546 * the port Link Status to go down -- if register writes work --
7547 * which should help our peers figure out that we're down.
7549 int t4_shutdown_adapter(struct adapter *adapter)
7553 t4_intr_disable(adapter);
7554 t4_write_reg(adapter, A_DBG_GPIO_EN, 0);
7555 for_each_port(adapter, port) {
7556 u32 a_port_cfg = PORT_REG(port,
7561 t4_write_reg(adapter, a_port_cfg,
7562 t4_read_reg(adapter, a_port_cfg)
7563 & ~V_SIGNAL_DET(1));
7565 t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0);
7571 * t4_init_devlog_params - initialize adapter->params.devlog
7572 * @adap: the adapter
7573 * @fw_attach: whether we can talk to the firmware
7575 * Initialize various fields of the adapter's Firmware Device Log
7576 * Parameters structure.
7578 int t4_init_devlog_params(struct adapter *adap, int fw_attach)
7580 struct devlog_params *dparams = &adap->params.devlog;
7582 unsigned int devlog_meminfo;
7583 struct fw_devlog_cmd devlog_cmd;
7586 /* If we're dealing with newer firmware, the Device Log Paramerters
7587 * are stored in a designated register which allows us to access the
7588 * Device Log even if we can't talk to the firmware.
7591 t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG));
7593 unsigned int nentries, nentries128;
7595 dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams);
7596 dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4;
7598 nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams);
7599 nentries = (nentries128 + 1) * 128;
7600 dparams->size = nentries * sizeof(struct fw_devlog_e);
7606 * For any failing returns ...
7608 memset(dparams, 0, sizeof *dparams);
7611 * If we can't talk to the firmware, there's really nothing we can do
7617 /* Otherwise, ask the firmware for it's Device Log Parameters.
7619 memset(&devlog_cmd, 0, sizeof devlog_cmd);
7620 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
7621 F_FW_CMD_REQUEST | F_FW_CMD_READ);
7622 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
7623 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
7629 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
7630 dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo);
7631 dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4;
7632 dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
7638 * t4_init_sge_params - initialize adap->params.sge
7639 * @adapter: the adapter
7641 * Initialize various fields of the adapter's SGE Parameters structure.
7643 int t4_init_sge_params(struct adapter *adapter)
7646 struct sge_params *sp = &adapter->params.sge;
7648 r = t4_read_reg(adapter, A_SGE_INGRESS_RX_THRESHOLD);
7649 sp->counter_val[0] = G_THRESHOLD_0(r);
7650 sp->counter_val[1] = G_THRESHOLD_1(r);
7651 sp->counter_val[2] = G_THRESHOLD_2(r);
7652 sp->counter_val[3] = G_THRESHOLD_3(r);
7654 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_0_AND_1);
7655 sp->timer_val[0] = core_ticks_to_us(adapter, G_TIMERVALUE0(r));
7656 sp->timer_val[1] = core_ticks_to_us(adapter, G_TIMERVALUE1(r));
7657 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_2_AND_3);
7658 sp->timer_val[2] = core_ticks_to_us(adapter, G_TIMERVALUE2(r));
7659 sp->timer_val[3] = core_ticks_to_us(adapter, G_TIMERVALUE3(r));
7660 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_4_AND_5);
7661 sp->timer_val[4] = core_ticks_to_us(adapter, G_TIMERVALUE4(r));
7662 sp->timer_val[5] = core_ticks_to_us(adapter, G_TIMERVALUE5(r));
7664 r = t4_read_reg(adapter, A_SGE_CONM_CTRL);
7665 sp->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1;
7667 sp->fl_starve_threshold2 = sp->fl_starve_threshold;
7669 sp->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(r) * 2 + 1;
7671 /* egress queues: log2 of # of doorbells per BAR2 page */
7672 r = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
7673 r >>= S_QUEUESPERPAGEPF0 +
7674 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
7675 sp->eq_s_qpp = r & M_QUEUESPERPAGEPF0;
7677 /* ingress queues: log2 of # of doorbells per BAR2 page */
7678 r = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
7679 r >>= S_QUEUESPERPAGEPF0 +
7680 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
7681 sp->iq_s_qpp = r & M_QUEUESPERPAGEPF0;
7683 r = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
7684 r >>= S_HOSTPAGESIZEPF0 +
7685 (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf;
7686 sp->page_shift = (r & M_HOSTPAGESIZEPF0) + 10;
7688 r = t4_read_reg(adapter, A_SGE_CONTROL);
7689 sp->spg_len = r & F_EGRSTATUSPAGESIZE ? 128 : 64;
7690 sp->fl_pktshift = G_PKTSHIFT(r);
7691 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) + 5);
7693 sp->pack_boundary = sp->pad_boundary;
7695 r = t4_read_reg(adapter, A_SGE_CONTROL2);
7696 if (G_INGPACKBOUNDARY(r) == 0)
7697 sp->pack_boundary = 16;
7699 sp->pack_boundary = 1 << (G_INGPACKBOUNDARY(r) + 5);
7706 * Read and cache the adapter's compressed filter mode and ingress config.
7708 static void read_filter_mode_and_ingress_config(struct adapter *adap)
7710 struct tp_params *tpp = &adap->params.tp;
7712 if (t4_use_ldst(adap)) {
7713 t4_fw_tp_pio_rw(adap, &tpp->vlan_pri_map, 1,
7714 A_TP_VLAN_PRI_MAP, 1);
7715 t4_fw_tp_pio_rw(adap, &tpp->ingress_config, 1,
7716 A_TP_INGRESS_CONFIG, 1);
7718 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
7719 &tpp->vlan_pri_map, 1, A_TP_VLAN_PRI_MAP);
7720 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
7721 &tpp->ingress_config, 1, A_TP_INGRESS_CONFIG);
7725 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
7726 * shift positions of several elements of the Compressed Filter Tuple
7727 * for this adapter which we need frequently ...
7729 tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE);
7730 tpp->port_shift = t4_filter_field_shift(adap, F_PORT);
7731 tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
7732 tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN);
7733 tpp->tos_shift = t4_filter_field_shift(adap, F_TOS);
7734 tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
7735 tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE);
7736 tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH);
7737 tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE);
7738 tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION);
7741 * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
7742 * represents the presence of an Outer VLAN instead of a VNIC ID.
7744 if ((tpp->ingress_config & F_VNIC) == 0)
7745 tpp->vnic_shift = -1;
7749 * t4_init_tp_params - initialize adap->params.tp
7750 * @adap: the adapter
7752 * Initialize various fields of the adapter's TP Parameters structure.
7754 int t4_init_tp_params(struct adapter *adap)
7758 struct tp_params *tpp = &adap->params.tp;
7760 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
7761 tpp->tre = G_TIMERRESOLUTION(v);
7762 tpp->dack_re = G_DELAYEDACKRESOLUTION(v);
7764 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
7765 for (chan = 0; chan < MAX_NCHAN; chan++)
7766 tpp->tx_modq[chan] = chan;
7768 read_filter_mode_and_ingress_config(adap);
7771 * For T6, cache the adapter's compressed error vector
7772 * and passing outer header info for encapsulated packets.
7774 if (chip_id(adap) > CHELSIO_T5) {
7775 v = t4_read_reg(adap, A_TP_OUT_CONFIG);
7776 tpp->rx_pkt_encap = (v & F_CRXPKTENC) ? 1 : 0;
7783 * t4_filter_field_shift - calculate filter field shift
7784 * @adap: the adapter
7785 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
7787 * Return the shift position of a filter field within the Compressed
7788 * Filter Tuple. The filter field is specified via its selection bit
7789 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
7791 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
7793 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
7797 if ((filter_mode & filter_sel) == 0)
7800 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
7801 switch (filter_mode & sel) {
7803 field_shift += W_FT_FCOE;
7806 field_shift += W_FT_PORT;
7809 field_shift += W_FT_VNIC_ID;
7812 field_shift += W_FT_VLAN;
7815 field_shift += W_FT_TOS;
7818 field_shift += W_FT_PROTOCOL;
7821 field_shift += W_FT_ETHERTYPE;
7824 field_shift += W_FT_MACMATCH;
7827 field_shift += W_FT_MPSHITTYPE;
7829 case F_FRAGMENTATION:
7830 field_shift += W_FT_FRAGMENTATION;
7837 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id)
7841 struct fw_port_cmd c;
7843 struct port_info *p = adap2pinfo(adap, port_id);
7846 memset(&c, 0, sizeof(c));
7848 for (i = 0, j = -1; i <= p->port_id; i++) {
7851 } while ((adap->params.portvec & (1 << j)) == 0);
7854 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) |
7855 F_FW_CMD_REQUEST | F_FW_CMD_READ |
7856 V_FW_PORT_CMD_PORTID(j));
7857 c.action_to_len16 = htonl(
7858 V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
7860 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7864 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
7868 p->vi[0].viid = ret;
7870 p->rx_chan_map = t4_get_mps_bg_map(adap, j);
7872 p->vi[0].rss_size = rss_size;
7873 t4_os_set_hw_addr(adap, p->port_id, addr);
7875 ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
7876 p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
7877 G_FW_PORT_CMD_MDIOADDR(ret) : -1;
7878 p->port_type = G_FW_PORT_CMD_PTYPE(ret);
7879 p->mod_type = G_FW_PORT_CMD_MODTYPE(ret);
7881 init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap));
7883 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
7884 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
7885 V_FW_PARAMS_PARAM_YZ(p->vi[0].viid);
7886 ret = t4_query_params(adap, mbox, pf, vf, 1, ¶m, &val);
7888 p->vi[0].rss_base = 0xffff;
7890 /* MPASS((val >> 16) == rss_size); */
7891 p->vi[0].rss_base = val & 0xffff;
7898 * t4_read_cimq_cfg - read CIM queue configuration
7899 * @adap: the adapter
7900 * @base: holds the queue base addresses in bytes
7901 * @size: holds the queue sizes in bytes
7902 * @thres: holds the queue full thresholds in bytes
7904 * Returns the current configuration of the CIM queues, starting with
7905 * the IBQs, then the OBQs.
7907 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
7910 int cim_num_obq = adap->chip_params->cim_num_obq;
7912 for (i = 0; i < CIM_NUM_IBQ; i++) {
7913 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
7915 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
7916 /* value is in 256-byte units */
7917 *base++ = G_CIMQBASE(v) * 256;
7918 *size++ = G_CIMQSIZE(v) * 256;
7919 *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
7921 for (i = 0; i < cim_num_obq; i++) {
7922 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
7924 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
7925 /* value is in 256-byte units */
7926 *base++ = G_CIMQBASE(v) * 256;
7927 *size++ = G_CIMQSIZE(v) * 256;
7932 * t4_read_cim_ibq - read the contents of a CIM inbound queue
7933 * @adap: the adapter
7934 * @qid: the queue index
7935 * @data: where to store the queue contents
7936 * @n: capacity of @data in 32-bit words
7938 * Reads the contents of the selected CIM queue starting at address 0 up
7939 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
7940 * error and the number of 32-bit words actually read on success.
7942 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
7944 int i, err, attempts;
7946 const unsigned int nwords = CIM_IBQ_SIZE * 4;
7948 if (qid > 5 || (n & 3))
7951 addr = qid * nwords;
7955 /* It might take 3-10ms before the IBQ debug read access is allowed.
7956 * Wait for 1 Sec with a delay of 1 usec.
7960 for (i = 0; i < n; i++, addr++) {
7961 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
7963 err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
7967 *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
7969 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
7974 * t4_read_cim_obq - read the contents of a CIM outbound queue
7975 * @adap: the adapter
7976 * @qid: the queue index
7977 * @data: where to store the queue contents
7978 * @n: capacity of @data in 32-bit words
7980 * Reads the contents of the selected CIM queue starting at address 0 up
7981 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
7982 * error and the number of 32-bit words actually read on success.
7984 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
7987 unsigned int addr, v, nwords;
7988 int cim_num_obq = adap->chip_params->cim_num_obq;
7990 if ((qid > (cim_num_obq - 1)) || (n & 3))
7993 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
7994 V_QUENUMSELECT(qid));
7995 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
7997 addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */
7998 nwords = G_CIMQSIZE(v) * 64; /* same */
8002 for (i = 0; i < n; i++, addr++) {
8003 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
8005 err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
8009 *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
8011 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
8017 CIM_CTL_BASE = 0x2000,
8018 CIM_PBT_ADDR_BASE = 0x2800,
8019 CIM_PBT_LRF_BASE = 0x3000,
8020 CIM_PBT_DATA_BASE = 0x3800
8024 * t4_cim_read - read a block from CIM internal address space
8025 * @adap: the adapter
8026 * @addr: the start address within the CIM address space
8027 * @n: number of words to read
8028 * @valp: where to store the result
8030 * Reads a block of 4-byte words from the CIM intenal address space.
8032 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
8037 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
8040 for ( ; !ret && n--; addr += 4) {
8041 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
8042 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
8045 *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
8051 * t4_cim_write - write a block into CIM internal address space
8052 * @adap: the adapter
8053 * @addr: the start address within the CIM address space
8054 * @n: number of words to write
8055 * @valp: set of values to write
8057 * Writes a block of 4-byte words into the CIM intenal address space.
8059 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
8060 const unsigned int *valp)
8064 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
8067 for ( ; !ret && n--; addr += 4) {
8068 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
8069 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
8070 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
8076 static int t4_cim_write1(struct adapter *adap, unsigned int addr,
8079 return t4_cim_write(adap, addr, 1, &val);
8083 * t4_cim_ctl_read - read a block from CIM control region
8084 * @adap: the adapter
8085 * @addr: the start address within the CIM control region
8086 * @n: number of words to read
8087 * @valp: where to store the result
8089 * Reads a block of 4-byte words from the CIM control region.
8091 int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
8094 return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
8098 * t4_cim_read_la - read CIM LA capture buffer
8099 * @adap: the adapter
8100 * @la_buf: where to store the LA data
8101 * @wrptr: the HW write pointer within the capture buffer
8103 * Reads the contents of the CIM LA buffer with the most recent entry at
8104 * the end of the returned data and with the entry at @wrptr first.
8105 * We try to leave the LA in the running state we find it in.
8107 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
8110 unsigned int cfg, val, idx;
8112 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
8116 if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */
8117 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
8122 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
8126 idx = G_UPDBGLAWRPTR(val);
8130 for (i = 0; i < adap->params.cim_la_size; i++) {
8131 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
8132 V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
8135 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
8138 if (val & F_UPDBGLARDEN) {
8142 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
8146 /* address can't exceed 0xfff (UpDbgLaRdPtr is of 12-bits) */
8147 idx = (idx + 1) & M_UPDBGLARDPTR;
8149 * Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
8150 * identify the 32-bit portion of the full 312-bit data
8153 while ((idx & 0xf) > 9)
8154 idx = (idx + 1) % M_UPDBGLARDPTR;
8157 if (cfg & F_UPDBGLAEN) {
8158 int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
8159 cfg & ~F_UPDBGLARDEN);
8167 * t4_tp_read_la - read TP LA capture buffer
8168 * @adap: the adapter
8169 * @la_buf: where to store the LA data
8170 * @wrptr: the HW write pointer within the capture buffer
8172 * Reads the contents of the TP LA buffer with the most recent entry at
8173 * the end of the returned data and with the entry at @wrptr first.
8174 * We leave the LA in the running state we find it in.
8176 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
8178 bool last_incomplete;
8179 unsigned int i, cfg, val, idx;
8181 cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
8182 if (cfg & F_DBGLAENABLE) /* freeze LA */
8183 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
8184 adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
8186 val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
8187 idx = G_DBGLAWPTR(val);
8188 last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
8189 if (last_incomplete)
8190 idx = (idx + 1) & M_DBGLARPTR;
8195 val &= ~V_DBGLARPTR(M_DBGLARPTR);
8196 val |= adap->params.tp.la_mask;
8198 for (i = 0; i < TPLA_SIZE; i++) {
8199 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
8200 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
8201 idx = (idx + 1) & M_DBGLARPTR;
8204 /* Wipe out last entry if it isn't valid */
8205 if (last_incomplete)
8206 la_buf[TPLA_SIZE - 1] = ~0ULL;
8208 if (cfg & F_DBGLAENABLE) /* restore running state */
8209 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
8210 cfg | adap->params.tp.la_mask);
8214 * SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
8215 * seconds). If we find one of the SGE Ingress DMA State Machines in the same
8216 * state for more than the Warning Threshold then we'll issue a warning about
8217 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel
8218 * appears to be hung every Warning Repeat second till the situation clears.
8219 * If the situation clears, we'll note that as well.
8221 #define SGE_IDMA_WARN_THRESH 1
8222 #define SGE_IDMA_WARN_REPEAT 300
8225 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
8226 * @adapter: the adapter
8227 * @idma: the adapter IDMA Monitor state
8229 * Initialize the state of an SGE Ingress DMA Monitor.
8231 void t4_idma_monitor_init(struct adapter *adapter,
8232 struct sge_idma_monitor_state *idma)
8234 /* Initialize the state variables for detecting an SGE Ingress DMA
8235 * hang. The SGE has internal counters which count up on each clock
8236 * tick whenever the SGE finds its Ingress DMA State Engines in the
8237 * same state they were on the previous clock tick. The clock used is
8238 * the Core Clock so we have a limit on the maximum "time" they can
8239 * record; typically a very small number of seconds. For instance,
8240 * with a 600MHz Core Clock, we can only count up to a bit more than
8241 * 7s. So we'll synthesize a larger counter in order to not run the
8242 * risk of having the "timers" overflow and give us the flexibility to
8243 * maintain a Hung SGE State Machine of our own which operates across
8244 * a longer time frame.
8246 idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
8247 idma->idma_stalled[0] = idma->idma_stalled[1] = 0;
8251 * t4_idma_monitor - monitor SGE Ingress DMA state
8252 * @adapter: the adapter
8253 * @idma: the adapter IDMA Monitor state
8254 * @hz: number of ticks/second
8255 * @ticks: number of ticks since the last IDMA Monitor call
8257 void t4_idma_monitor(struct adapter *adapter,
8258 struct sge_idma_monitor_state *idma,
8261 int i, idma_same_state_cnt[2];
8263 /* Read the SGE Debug Ingress DMA Same State Count registers. These
8264 * are counters inside the SGE which count up on each clock when the
8265 * SGE finds its Ingress DMA State Engines in the same states they
8266 * were in the previous clock. The counters will peg out at
8267 * 0xffffffff without wrapping around so once they pass the 1s
8268 * threshold they'll stay above that till the IDMA state changes.
8270 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13);
8271 idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH);
8272 idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
8274 for (i = 0; i < 2; i++) {
8275 u32 debug0, debug11;
8277 /* If the Ingress DMA Same State Counter ("timer") is less
8278 * than 1s, then we can reset our synthesized Stall Timer and
8279 * continue. If we have previously emitted warnings about a
8280 * potential stalled Ingress Queue, issue a note indicating
8281 * that the Ingress Queue has resumed forward progress.
8283 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
8284 if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz)
8285 CH_WARN(adapter, "SGE idma%d, queue %u, "
8286 "resumed after %d seconds\n",
8287 i, idma->idma_qid[i],
8288 idma->idma_stalled[i]/hz);
8289 idma->idma_stalled[i] = 0;
8293 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
8294 * domain. The first time we get here it'll be because we
8295 * passed the 1s Threshold; each additional time it'll be
8296 * because the RX Timer Callback is being fired on its regular
8299 * If the stall is below our Potential Hung Ingress Queue
8300 * Warning Threshold, continue.
8302 if (idma->idma_stalled[i] == 0) {
8303 idma->idma_stalled[i] = hz;
8304 idma->idma_warn[i] = 0;
8306 idma->idma_stalled[i] += ticks;
8307 idma->idma_warn[i] -= ticks;
8310 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz)
8313 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
8315 if (idma->idma_warn[i] > 0)
8317 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz;
8319 /* Read and save the SGE IDMA State and Queue ID information.
8320 * We do this every time in case it changes across time ...
8321 * can't be too careful ...
8323 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0);
8324 debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
8325 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
8327 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11);
8328 debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
8329 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
8331 CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in "
8332 " state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
8333 i, idma->idma_qid[i], idma->idma_state[i],
8334 idma->idma_stalled[i]/hz,
8336 t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
8341 * t4_read_pace_tbl - read the pace table
8342 * @adap: the adapter
8343 * @pace_vals: holds the returned values
8345 * Returns the values of TP's pace table in microseconds.
8347 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
8351 for (i = 0; i < NTX_SCHED; i++) {
8352 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
8353 v = t4_read_reg(adap, A_TP_PACE_TABLE);
8354 pace_vals[i] = dack_ticks_to_usec(adap, v);
8359 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
8360 * @adap: the adapter
8361 * @sched: the scheduler index
8362 * @kbps: the byte rate in Kbps
8363 * @ipg: the interpacket delay in tenths of nanoseconds
8365 * Return the current configuration of a HW Tx scheduler.
8367 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
8370 unsigned int v, addr, bpt, cpt;
8373 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
8374 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
8375 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
8378 bpt = (v >> 8) & 0xff;
8381 *kbps = 0; /* scheduler disabled */
8383 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
8384 *kbps = (v * bpt) / 125;
8388 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
8389 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
8390 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
8394 *ipg = (10000 * v) / core_ticks_per_usec(adap);
8399 * t4_load_cfg - download config file
8400 * @adap: the adapter
8401 * @cfg_data: the cfg text file to write
8402 * @size: text file size
8404 * Write the supplied config text file to the card's serial flash.
8406 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
8408 int ret, i, n, cfg_addr;
8410 unsigned int flash_cfg_start_sec;
8411 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
8413 cfg_addr = t4_flash_cfg_addr(adap);
8418 flash_cfg_start_sec = addr / SF_SEC_SIZE;
8420 if (size > FLASH_CFG_MAX_SIZE) {
8421 CH_ERR(adap, "cfg file too large, max is %u bytes\n",
8422 FLASH_CFG_MAX_SIZE);
8426 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
8428 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
8429 flash_cfg_start_sec + i - 1);
8431 * If size == 0 then we're simply erasing the FLASH sectors associated
8432 * with the on-adapter Firmware Configuration File.
8434 if (ret || size == 0)
8437 /* this will write to the flash up to SF_PAGE_SIZE at a time */
8438 for (i = 0; i< size; i+= SF_PAGE_SIZE) {
8439 if ( (size - i) < SF_PAGE_SIZE)
8443 ret = t4_write_flash(adap, addr, n, cfg_data, 1);
8447 addr += SF_PAGE_SIZE;
8448 cfg_data += SF_PAGE_SIZE;
8453 CH_ERR(adap, "config file %s failed %d\n",
8454 (size == 0 ? "clear" : "download"), ret);
8459 * t5_fw_init_extern_mem - initialize the external memory
8460 * @adap: the adapter
8462 * Initializes the external memory on T5.
8464 int t5_fw_init_extern_mem(struct adapter *adap)
8466 u32 params[1], val[1];
8472 val[0] = 0xff; /* Initialize all MCs */
8473 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
8474 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT));
8475 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val,
8476 FW_CMD_MAX_TIMEOUT);
8481 /* BIOS boot headers */
8482 typedef struct pci_expansion_rom_header {
8483 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
8484 u8 reserved[22]; /* Reserved per processor Architecture data */
8485 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
8486 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
8488 /* Legacy PCI Expansion ROM Header */
8489 typedef struct legacy_pci_expansion_rom_header {
8490 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
8491 u8 size512; /* Current Image Size in units of 512 bytes */
8492 u8 initentry_point[4];
8493 u8 cksum; /* Checksum computed on the entire Image */
8494 u8 reserved[16]; /* Reserved */
8495 u8 pcir_offset[2]; /* Offset to PCI Data Struture */
8496 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
8498 /* EFI PCI Expansion ROM Header */
8499 typedef struct efi_pci_expansion_rom_header {
8500 u8 signature[2]; // ROM signature. The value 0xaa55
8501 u8 initialization_size[2]; /* Units 512. Includes this header */
8502 u8 efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
8503 u8 efi_subsystem[2]; /* Subsystem value for EFI image header */
8504 u8 efi_machine_type[2]; /* Machine type from EFI image header */
8505 u8 compression_type[2]; /* Compression type. */
8507 * Compression type definition
8510 * 0x2-0xFFFF: Reserved
8512 u8 reserved[8]; /* Reserved */
8513 u8 efi_image_header_offset[2]; /* Offset to EFI Image */
8514 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
8515 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
8517 /* PCI Data Structure Format */
8518 typedef struct pcir_data_structure { /* PCI Data Structure */
8519 u8 signature[4]; /* Signature. The string "PCIR" */
8520 u8 vendor_id[2]; /* Vendor Identification */
8521 u8 device_id[2]; /* Device Identification */
8522 u8 vital_product[2]; /* Pointer to Vital Product Data */
8523 u8 length[2]; /* PCIR Data Structure Length */
8524 u8 revision; /* PCIR Data Structure Revision */
8525 u8 class_code[3]; /* Class Code */
8526 u8 image_length[2]; /* Image Length. Multiple of 512B */
8527 u8 code_revision[2]; /* Revision Level of Code/Data */
8528 u8 code_type; /* Code Type. */
8530 * PCI Expansion ROM Code Types
8531 * 0x00: Intel IA-32, PC-AT compatible. Legacy
8532 * 0x01: Open Firmware standard for PCI. FCODE
8533 * 0x02: Hewlett-Packard PA RISC. HP reserved
8534 * 0x03: EFI Image. EFI
8535 * 0x04-0xFF: Reserved.
8537 u8 indicator; /* Indicator. Identifies the last image in the ROM */
8538 u8 reserved[2]; /* Reserved */
8539 } pcir_data_t; /* PCI__DATA_STRUCTURE */
8541 /* BOOT constants */
8543 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
8544 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */
8545 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */
8546 BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
8547 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment */
8548 VENDOR_ID = 0x1425, /* Vendor ID */
8549 PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
8553 * modify_device_id - Modifies the device ID of the Boot BIOS image
8554 * @adatper: the device ID to write.
8555 * @boot_data: the boot image to modify.
8557 * Write the supplied device ID to the boot BIOS image.
8559 static void modify_device_id(int device_id, u8 *boot_data)
8561 legacy_pci_exp_rom_header_t *header;
8562 pcir_data_t *pcir_header;
8566 * Loop through all chained images and change the device ID's
8569 header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
8570 pcir_header = (pcir_data_t *) &boot_data[cur_header +
8571 le16_to_cpu(*(u16*)header->pcir_offset)];
8574 * Only modify the Device ID if code type is Legacy or HP.
8575 * 0x00: Okay to modify
8576 * 0x01: FCODE. Do not be modify
8577 * 0x03: Okay to modify
8578 * 0x04-0xFF: Do not modify
8580 if (pcir_header->code_type == 0x00) {
8585 * Modify Device ID to match current adatper
8587 *(u16*) pcir_header->device_id = device_id;
8590 * Set checksum temporarily to 0.
8591 * We will recalculate it later.
8593 header->cksum = 0x0;
8596 * Calculate and update checksum
8598 for (i = 0; i < (header->size512 * 512); i++)
8599 csum += (u8)boot_data[cur_header + i];
8602 * Invert summed value to create the checksum
8603 * Writing new checksum value directly to the boot data
8605 boot_data[cur_header + 7] = -csum;
8607 } else if (pcir_header->code_type == 0x03) {
8610 * Modify Device ID to match current adatper
8612 *(u16*) pcir_header->device_id = device_id;
8618 * Check indicator element to identify if this is the last
8621 if (pcir_header->indicator & 0x80)
8625 * Move header pointer up to the next image in the ROM.
8627 cur_header += header->size512 * 512;
8632 * t4_load_boot - download boot flash
8633 * @adapter: the adapter
8634 * @boot_data: the boot image to write
8635 * @boot_addr: offset in flash to write boot_data
8638 * Write the supplied boot image to the card's serial flash.
8639 * The boot image has the following sections: a 28-byte header and the
8642 int t4_load_boot(struct adapter *adap, u8 *boot_data,
8643 unsigned int boot_addr, unsigned int size)
8645 pci_exp_rom_header_t *header;
8647 pcir_data_t *pcir_header;
8651 unsigned int boot_sector = (boot_addr * 1024 );
8652 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
8655 * Make sure the boot image does not encroach on the firmware region
8657 if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
8658 CH_ERR(adap, "boot image encroaching on firmware region\n");
8663 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
8664 * and Boot configuration data sections. These 3 boot sections span
8665 * sectors 0 to 7 in flash and live right before the FW image location.
8667 i = DIV_ROUND_UP(size ? size : FLASH_FW_START,
8669 ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
8670 (boot_sector >> 16) + i - 1);
8673 * If size == 0 then we're simply erasing the FLASH sectors associated
8674 * with the on-adapter option ROM file
8676 if (ret || (size == 0))
8679 /* Get boot header */
8680 header = (pci_exp_rom_header_t *)boot_data;
8681 pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
8682 /* PCIR Data Structure */
8683 pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
8686 * Perform some primitive sanity testing to avoid accidentally
8687 * writing garbage over the boot sectors. We ought to check for
8688 * more but it's not worth it for now ...
8690 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
8691 CH_ERR(adap, "boot image too small/large\n");
8695 #ifndef CHELSIO_T4_DIAGS
8697 * Check BOOT ROM header signature
8699 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
8700 CH_ERR(adap, "Boot image missing signature\n");
8705 * Check PCI header signature
8707 if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
8708 CH_ERR(adap, "PCI header missing signature\n");
8713 * Check Vendor ID matches Chelsio ID
8715 if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
8716 CH_ERR(adap, "Vendor ID missing signature\n");
8722 * Retrieve adapter's device ID
8724 t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
8725 /* Want to deal with PF 0 so I strip off PF 4 indicator */
8726 device_id = device_id & 0xf0ff;
8729 * Check PCIE Device ID
8731 if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
8733 * Change the device ID in the Boot BIOS image to match
8734 * the Device ID of the current adapter.
8736 modify_device_id(device_id, boot_data);
8740 * Skip over the first SF_PAGE_SIZE worth of data and write it after
8741 * we finish copying the rest of the boot image. This will ensure
8742 * that the BIOS boot header will only be written if the boot image
8743 * was written in full.
8746 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
8747 addr += SF_PAGE_SIZE;
8748 boot_data += SF_PAGE_SIZE;
8749 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
8754 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
8755 (const u8 *)header, 0);
8759 CH_ERR(adap, "boot image download failed, error %d\n", ret);
8764 * t4_flash_bootcfg_addr - return the address of the flash optionrom configuration
8765 * @adapter: the adapter
8767 * Return the address within the flash where the OptionROM Configuration
8768 * is stored, or an error if the device FLASH is too small to contain
8769 * a OptionROM Configuration.
8771 static int t4_flash_bootcfg_addr(struct adapter *adapter)
8774 * If the device FLASH isn't large enough to hold a Firmware
8775 * Configuration File, return an error.
8777 if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE)
8780 return FLASH_BOOTCFG_START;
8783 int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size)
8785 int ret, i, n, cfg_addr;
8787 unsigned int flash_cfg_start_sec;
8788 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
8790 cfg_addr = t4_flash_bootcfg_addr(adap);
8795 flash_cfg_start_sec = addr / SF_SEC_SIZE;
8797 if (size > FLASH_BOOTCFG_MAX_SIZE) {
8798 CH_ERR(adap, "bootcfg file too large, max is %u bytes\n",
8799 FLASH_BOOTCFG_MAX_SIZE);
8803 i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */
8805 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
8806 flash_cfg_start_sec + i - 1);
8809 * If size == 0 then we're simply erasing the FLASH sectors associated
8810 * with the on-adapter OptionROM Configuration File.
8812 if (ret || size == 0)
8815 /* this will write to the flash up to SF_PAGE_SIZE at a time */
8816 for (i = 0; i< size; i+= SF_PAGE_SIZE) {
8817 if ( (size - i) < SF_PAGE_SIZE)
8821 ret = t4_write_flash(adap, addr, n, cfg_data, 0);
8825 addr += SF_PAGE_SIZE;
8826 cfg_data += SF_PAGE_SIZE;
8831 CH_ERR(adap, "boot config data %s failed %d\n",
8832 (size == 0 ? "clear" : "download"), ret);
8837 * t4_set_filter_mode - configure the optional components of filter tuples
8838 * @adap: the adapter
8839 * @mode_map: a bitmap selcting which optional filter components to enable
8841 * Sets the filter mode by selecting the optional components to enable
8842 * in filter tuples. Returns 0 on success and a negative error if the
8843 * requested mode needs more bits than are available for optional
8846 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map)
8848 static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
8852 for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
8853 if (mode_map & (1 << i))
8855 if (nbits > FILTER_OPT_LEN)
8857 if (t4_use_ldst(adap))
8858 t4_fw_tp_pio_rw(adap, &mode_map, 1, A_TP_VLAN_PRI_MAP, 0);
8860 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map,
8861 1, A_TP_VLAN_PRI_MAP);
8862 read_filter_mode_and_ingress_config(adap);
8868 * t4_clr_port_stats - clear port statistics
8869 * @adap: the adapter
8870 * @idx: the port index
8872 * Clear HW statistics for the given port.
8874 void t4_clr_port_stats(struct adapter *adap, int idx)
8877 u32 bgmap = t4_get_mps_bg_map(adap, idx);
8881 port_base_addr = PORT_BASE(idx);
8883 port_base_addr = T5_PORT_BASE(idx);
8885 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
8886 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
8887 t4_write_reg(adap, port_base_addr + i, 0);
8888 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
8889 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
8890 t4_write_reg(adap, port_base_addr + i, 0);
8891 for (i = 0; i < 4; i++)
8892 if (bgmap & (1 << i)) {
8894 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
8896 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
8901 * t4_i2c_rd - read I2C data from adapter
8902 * @adap: the adapter
8903 * @port: Port number if per-port device; <0 if not
8904 * @devid: per-port device ID or absolute device ID
8905 * @offset: byte offset into device I2C space
8906 * @len: byte length of I2C space data
8907 * @buf: buffer in which to return I2C data
8909 * Reads the I2C data from the indicated device and location.
8911 int t4_i2c_rd(struct adapter *adap, unsigned int mbox,
8912 int port, unsigned int devid,
8913 unsigned int offset, unsigned int len,
8917 struct fw_ldst_cmd ldst;
8923 len > sizeof ldst.u.i2c.data)
8926 memset(&ldst, 0, sizeof ldst);
8927 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
8928 ldst.op_to_addrspace =
8929 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
8933 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
8934 ldst.u.i2c.pid = (port < 0 ? 0xff : port);
8935 ldst.u.i2c.did = devid;
8936 ldst.u.i2c.boffset = offset;
8937 ldst.u.i2c.blen = len;
8938 ret = t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
8940 memcpy(buf, ldst.u.i2c.data, len);
8945 * t4_i2c_wr - write I2C data to adapter
8946 * @adap: the adapter
8947 * @port: Port number if per-port device; <0 if not
8948 * @devid: per-port device ID or absolute device ID
8949 * @offset: byte offset into device I2C space
8950 * @len: byte length of I2C space data
8951 * @buf: buffer containing new I2C data
8953 * Write the I2C data to the indicated device and location.
8955 int t4_i2c_wr(struct adapter *adap, unsigned int mbox,
8956 int port, unsigned int devid,
8957 unsigned int offset, unsigned int len,
8961 struct fw_ldst_cmd ldst;
8966 len > sizeof ldst.u.i2c.data)
8969 memset(&ldst, 0, sizeof ldst);
8970 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
8971 ldst.op_to_addrspace =
8972 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
8976 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
8977 ldst.u.i2c.pid = (port < 0 ? 0xff : port);
8978 ldst.u.i2c.did = devid;
8979 ldst.u.i2c.boffset = offset;
8980 ldst.u.i2c.blen = len;
8981 memcpy(ldst.u.i2c.data, buf, len);
8982 return t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
8986 * t4_sge_ctxt_rd - read an SGE context through FW
8987 * @adap: the adapter
8988 * @mbox: mailbox to use for the FW command
8989 * @cid: the context id
8990 * @ctype: the context type
8991 * @data: where to store the context data
8993 * Issues a FW command through the given mailbox to read an SGE context.
8995 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
8996 enum ctxt_type ctype, u32 *data)
8999 struct fw_ldst_cmd c;
9001 if (ctype == CTXT_EGRESS)
9002 ret = FW_LDST_ADDRSPC_SGE_EGRC;
9003 else if (ctype == CTXT_INGRESS)
9004 ret = FW_LDST_ADDRSPC_SGE_INGC;
9005 else if (ctype == CTXT_FLM)
9006 ret = FW_LDST_ADDRSPC_SGE_FLMC;
9008 ret = FW_LDST_ADDRSPC_SGE_CONMC;
9010 memset(&c, 0, sizeof(c));
9011 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
9012 F_FW_CMD_REQUEST | F_FW_CMD_READ |
9013 V_FW_LDST_CMD_ADDRSPACE(ret));
9014 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
9015 c.u.idctxt.physid = cpu_to_be32(cid);
9017 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
9019 data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
9020 data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
9021 data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
9022 data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
9023 data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
9024 data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
9030 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
9031 * @adap: the adapter
9032 * @cid: the context id
9033 * @ctype: the context type
9034 * @data: where to store the context data
9036 * Reads an SGE context directly, bypassing FW. This is only for
9037 * debugging when FW is unavailable.
9039 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
9044 t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
9045 ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
9047 for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
9048 *data++ = t4_read_reg(adap, i);
9052 int t4_sched_config(struct adapter *adapter, int type, int minmaxen,
9055 struct fw_sched_cmd cmd;
9057 memset(&cmd, 0, sizeof(cmd));
9058 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
9061 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9063 cmd.u.config.sc = FW_SCHED_SC_CONFIG;
9064 cmd.u.config.type = type;
9065 cmd.u.config.minmaxen = minmaxen;
9067 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
9071 int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
9072 int rateunit, int ratemode, int channel, int cl,
9073 int minrate, int maxrate, int weight, int pktsize,
9076 struct fw_sched_cmd cmd;
9078 memset(&cmd, 0, sizeof(cmd));
9079 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
9082 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9084 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
9085 cmd.u.params.type = type;
9086 cmd.u.params.level = level;
9087 cmd.u.params.mode = mode;
9088 cmd.u.params.ch = channel;
9089 cmd.u.params.cl = cl;
9090 cmd.u.params.unit = rateunit;
9091 cmd.u.params.rate = ratemode;
9092 cmd.u.params.min = cpu_to_be32(minrate);
9093 cmd.u.params.max = cpu_to_be32(maxrate);
9094 cmd.u.params.weight = cpu_to_be16(weight);
9095 cmd.u.params.pktsize = cpu_to_be16(pktsize);
9097 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
9102 * t4_config_watchdog - configure (enable/disable) a watchdog timer
9103 * @adapter: the adapter
9104 * @mbox: mailbox to use for the FW command
9105 * @pf: the PF owning the queue
9106 * @vf: the VF owning the queue
9107 * @timeout: watchdog timeout in ms
9108 * @action: watchdog timer / action
9110 * There are separate watchdog timers for each possible watchdog
9111 * action. Configure one of the watchdog timers by setting a non-zero
9112 * timeout. Disable a watchdog timer by using a timeout of zero.
9114 int t4_config_watchdog(struct adapter *adapter, unsigned int mbox,
9115 unsigned int pf, unsigned int vf,
9116 unsigned int timeout, unsigned int action)
9118 struct fw_watchdog_cmd wdog;
9122 * The watchdog command expects a timeout in units of 10ms so we need
9123 * to convert it here (via rounding) and force a minimum of one 10ms
9124 * "tick" if the timeout is non-zero but the conversion results in 0
9127 ticks = (timeout + 5)/10;
9128 if (timeout && !ticks)
9131 memset(&wdog, 0, sizeof wdog);
9132 wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) |
9135 V_FW_PARAMS_CMD_PFN(pf) |
9136 V_FW_PARAMS_CMD_VFN(vf));
9137 wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog));
9138 wdog.timeout = cpu_to_be32(ticks);
9139 wdog.action = cpu_to_be32(action);
9141 return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL);
9144 int t4_get_devlog_level(struct adapter *adapter, unsigned int *level)
9146 struct fw_devlog_cmd devlog_cmd;
9149 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
9150 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
9151 F_FW_CMD_REQUEST | F_FW_CMD_READ);
9152 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
9153 ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
9154 sizeof(devlog_cmd), &devlog_cmd);
9158 *level = devlog_cmd.level;
9162 int t4_set_devlog_level(struct adapter *adapter, unsigned int level)
9164 struct fw_devlog_cmd devlog_cmd;
9166 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
9167 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
9170 devlog_cmd.level = level;
9171 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
9172 return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
9173 sizeof(devlog_cmd), &devlog_cmd);