2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2012, 2016 Chelsio Communications, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/eventhandler.h>
39 #include "t4_regs_values.h"
40 #include "firmware/t4fw_interface.h"
43 #define msleep(x) do { \
47 pause("t4hw", (x) * hz / 1000); \
51 * t4_wait_op_done_val - wait until an operation is completed
52 * @adapter: the adapter performing the operation
53 * @reg: the register to check for completion
54 * @mask: a single-bit field within @reg that indicates completion
55 * @polarity: the value of the field when the operation is completed
56 * @attempts: number of check iterations
57 * @delay: delay in usecs between iterations
58 * @valp: where to store the value of the register at completion time
60 * Wait until an operation is completed by checking a bit in a register
61 * up to @attempts times. If @valp is not NULL the value of the register
62 * at the time it indicated completion is stored there. Returns 0 if the
63 * operation completes and -EAGAIN otherwise.
65 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
66 int polarity, int attempts, int delay, u32 *valp)
69 u32 val = t4_read_reg(adapter, reg);
71 if (!!(val & mask) == polarity) {
83 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
84 int polarity, int attempts, int delay)
86 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
91 * t4_set_reg_field - set a register field to a value
92 * @adapter: the adapter to program
93 * @addr: the register address
94 * @mask: specifies the portion of the register to modify
95 * @val: the new value for the register field
97 * Sets a register field specified by the supplied mask to the
100 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
103 u32 v = t4_read_reg(adapter, addr) & ~mask;
105 t4_write_reg(adapter, addr, v | val);
106 (void) t4_read_reg(adapter, addr); /* flush */
110 * t4_read_indirect - read indirectly addressed registers
112 * @addr_reg: register holding the indirect address
113 * @data_reg: register holding the value of the indirect register
114 * @vals: where the read register values are stored
115 * @nregs: how many indirect registers to read
116 * @start_idx: index of first indirect register to read
118 * Reads registers that are accessed indirectly through an address/data
121 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
122 unsigned int data_reg, u32 *vals,
123 unsigned int nregs, unsigned int start_idx)
126 t4_write_reg(adap, addr_reg, start_idx);
127 *vals++ = t4_read_reg(adap, data_reg);
133 * t4_write_indirect - write indirectly addressed registers
135 * @addr_reg: register holding the indirect addresses
136 * @data_reg: register holding the value for the indirect registers
137 * @vals: values to write
138 * @nregs: how many indirect registers to write
139 * @start_idx: address of first indirect register to write
141 * Writes a sequential block of registers that are accessed indirectly
142 * through an address/data register pair.
144 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
145 unsigned int data_reg, const u32 *vals,
146 unsigned int nregs, unsigned int start_idx)
149 t4_write_reg(adap, addr_reg, start_idx++);
150 t4_write_reg(adap, data_reg, *vals++);
155 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
156 * mechanism. This guarantees that we get the real value even if we're
157 * operating within a Virtual Machine and the Hypervisor is trapping our
158 * Configuration Space accesses.
160 * N.B. This routine should only be used as a last resort: the firmware uses
161 * the backdoor registers on a regular basis and we can end up
162 * conflicting with it's uses!
164 u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
166 u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg);
169 if (chip_id(adap) <= CHELSIO_T5)
177 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req);
178 val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
181 * Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
182 * Configuration Space read. (None of the other fields matter when
183 * F_ENABLE is 0 so a simple register write is easier than a
184 * read-modify-write via t4_set_reg_field().)
186 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0);
192 * t4_report_fw_error - report firmware error
195 * The adapter firmware can indicate error conditions to the host.
196 * If the firmware has indicated an error, print out the reason for
197 * the firmware error.
199 static void t4_report_fw_error(struct adapter *adap)
201 static const char *const reason[] = {
202 "Crash", /* PCIE_FW_EVAL_CRASH */
203 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
204 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
205 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
206 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
207 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
208 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
209 "Reserved", /* reserved */
213 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
214 if (pcie_fw & F_PCIE_FW_ERR)
215 CH_ERR(adap, "Firmware reports adapter error: %s\n",
216 reason[G_PCIE_FW_EVAL(pcie_fw)]);
220 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
222 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
225 for ( ; nflit; nflit--, mbox_addr += 8)
226 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
230 * Handle a FW assertion reported in a mailbox.
232 static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt)
235 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
236 asrt->u.assert.filename_0_7,
237 be32_to_cpu(asrt->u.assert.line),
238 be32_to_cpu(asrt->u.assert.x),
239 be32_to_cpu(asrt->u.assert.y));
242 #define X_CIM_PF_NOACCESS 0xeeeeeeee
244 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
246 * @mbox: index of the mailbox to use
247 * @cmd: the command to write
248 * @size: command length in bytes
249 * @rpl: where to optionally store the reply
250 * @sleep_ok: if true we may sleep while awaiting command completion
251 * @timeout: time to wait for command to finish before timing out
252 * (negative implies @sleep_ok=false)
254 * Sends the given command to FW through the selected mailbox and waits
255 * for the FW to execute the command. If @rpl is not %NULL it is used to
256 * store the FW's reply to the command. The command and its optional
257 * reply are of the same length. Some FW commands like RESET and
258 * INITIALIZE can take a considerable amount of time to execute.
259 * @sleep_ok determines whether we may sleep while awaiting the response.
260 * If sleeping is allowed we use progressive backoff otherwise we spin.
261 * Note that passing in a negative @timeout is an alternate mechanism
262 * for specifying @sleep_ok=false. This is useful when a higher level
263 * interface allows for specification of @timeout but not @sleep_ok ...
265 * The return value is 0 on success or a negative errno on failure. A
266 * failure can happen either because we are not able to execute the
267 * command or FW executes it but signals an error. In the latter case
268 * the return value is the error code indicated by FW (negated).
270 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
271 int size, void *rpl, bool sleep_ok, int timeout)
274 * We delay in small increments at first in an effort to maintain
275 * responsiveness for simple, fast executing commands but then back
276 * off to larger delays to a maximum retry delay.
278 static const int delay[] = {
279 1, 1, 3, 5, 10, 10, 20, 50, 100
283 int i, ms, delay_idx, ret;
284 const __be64 *p = cmd;
285 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
286 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
288 __be64 cmd_rpl[MBOX_LEN/8];
291 if (adap->flags & CHK_MBOX_ACCESS)
292 ASSERT_SYNCHRONIZED_OP(adap);
294 if ((size & 15) || size > MBOX_LEN)
297 if (adap->flags & IS_VF) {
299 data_reg = FW_T6VF_MBDATA_BASE_ADDR;
301 data_reg = FW_T4VF_MBDATA_BASE_ADDR;
302 ctl_reg = VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL);
306 * If we have a negative timeout, that implies that we can't sleep.
314 * Attempt to gain access to the mailbox.
316 for (i = 0; i < 4; i++) {
317 ctl = t4_read_reg(adap, ctl_reg);
319 if (v != X_MBOWNER_NONE)
324 * If we were unable to gain access, dequeue ourselves from the
325 * mailbox atomic access list and report the error to our caller.
327 if (v != X_MBOWNER_PL) {
328 t4_report_fw_error(adap);
329 ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
334 * If we gain ownership of the mailbox and there's a "valid" message
335 * in it, this is likely an asynchronous error message from the
336 * firmware. So we'll report that and then proceed on with attempting
337 * to issue our own command ... which may well fail if the error
338 * presaged the firmware crashing ...
340 if (ctl & F_MBMSGVALID) {
341 CH_ERR(adap, "found VALID command in mbox %u: %016llx %016llx "
342 "%016llx %016llx %016llx %016llx %016llx %016llx\n",
343 mbox, (unsigned long long)t4_read_reg64(adap, data_reg),
344 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
345 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
346 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
347 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
348 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
349 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
350 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
354 * Copy in the new mailbox command and send it on its way ...
356 for (i = 0; i < size; i += 8, p++)
357 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
359 if (adap->flags & IS_VF) {
361 * For the VFs, the Mailbox Data "registers" are
362 * actually backed by T4's "MA" interface rather than
363 * PL Registers (as is the case for the PFs). Because
364 * these are in different coherency domains, the write
365 * to the VF's PL-register-backed Mailbox Control can
366 * race in front of the writes to the MA-backed VF
367 * Mailbox Data "registers". So we need to do a
368 * read-back on at least one byte of the VF Mailbox
369 * Data registers before doing the write to the VF
370 * Mailbox Control register.
372 t4_read_reg(adap, data_reg);
375 CH_DUMP_MBOX(adap, mbox, data_reg);
377 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
378 t4_read_reg(adap, ctl_reg); /* flush write */
384 * Loop waiting for the reply; bail out if we time out or the firmware
388 for (i = 0; i < timeout; i += ms) {
389 if (!(adap->flags & IS_VF)) {
390 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
391 if (pcie_fw & F_PCIE_FW_ERR)
395 ms = delay[delay_idx]; /* last element may repeat */
396 if (delay_idx < ARRAY_SIZE(delay) - 1)
403 v = t4_read_reg(adap, ctl_reg);
404 if (v == X_CIM_PF_NOACCESS)
406 if (G_MBOWNER(v) == X_MBOWNER_PL) {
407 if (!(v & F_MBMSGVALID)) {
408 t4_write_reg(adap, ctl_reg,
409 V_MBOWNER(X_MBOWNER_NONE));
414 * Retrieve the command reply and release the mailbox.
416 get_mbox_rpl(adap, cmd_rpl, MBOX_LEN/8, data_reg);
417 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
419 CH_DUMP_MBOX(adap, mbox, data_reg);
421 res = be64_to_cpu(cmd_rpl[0]);
422 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
423 fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl);
424 res = V_FW_CMD_RETVAL(EIO);
426 memcpy(rpl, cmd_rpl, size);
427 return -G_FW_CMD_RETVAL((int)res);
432 * We timed out waiting for a reply to our mailbox command. Report
433 * the error and also check to see if the firmware reported any
436 ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
437 CH_ERR(adap, "command %#x in mailbox %d timed out\n",
438 *(const u8 *)cmd, mbox);
440 /* If DUMP_MBOX is set the mbox has already been dumped */
441 if ((adap->debug_flags & DF_DUMP_MBOX) == 0) {
443 CH_ERR(adap, "mbox: %016llx %016llx %016llx %016llx "
444 "%016llx %016llx %016llx %016llx\n",
445 (unsigned long long)be64_to_cpu(p[0]),
446 (unsigned long long)be64_to_cpu(p[1]),
447 (unsigned long long)be64_to_cpu(p[2]),
448 (unsigned long long)be64_to_cpu(p[3]),
449 (unsigned long long)be64_to_cpu(p[4]),
450 (unsigned long long)be64_to_cpu(p[5]),
451 (unsigned long long)be64_to_cpu(p[6]),
452 (unsigned long long)be64_to_cpu(p[7]));
455 t4_report_fw_error(adap);
460 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
461 void *rpl, bool sleep_ok)
463 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl,
464 sleep_ok, FW_CMD_MAX_TIMEOUT);
468 static int t4_edc_err_read(struct adapter *adap, int idx)
470 u32 edc_ecc_err_addr_reg;
471 u32 edc_bist_status_rdata_reg;
474 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
477 if (idx != MEM_EDC0 && idx != MEM_EDC1) {
478 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
482 edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx);
483 edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx);
486 "edc%d err addr 0x%x: 0x%x.\n",
487 idx, edc_ecc_err_addr_reg,
488 t4_read_reg(adap, edc_ecc_err_addr_reg));
490 "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
491 edc_bist_status_rdata_reg,
492 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg),
493 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8),
494 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16),
495 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24),
496 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32),
497 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40),
498 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48),
499 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56),
500 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64));
506 * t4_mc_read - read from MC through backdoor accesses
508 * @idx: which MC to access
509 * @addr: address of first byte requested
510 * @data: 64 bytes of data containing the requested address
511 * @ecc: where to store the corresponding 64-bit ECC word
513 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
514 * that covers the requested address @addr. If @parity is not %NULL it
515 * is assigned the 64-bit ECC word for the read data.
517 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
520 u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
521 u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
524 mc_bist_cmd_reg = A_MC_BIST_CMD;
525 mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR;
526 mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN;
527 mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA;
528 mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN;
530 mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx);
531 mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx);
532 mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx);
533 mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA,
535 mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN,
539 if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST)
541 t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU);
542 t4_write_reg(adap, mc_bist_cmd_len_reg, 64);
543 t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc);
544 t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) |
545 F_START_BIST | V_BIST_CMD_GAP(1));
546 i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
550 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i)
552 for (i = 15; i >= 0; i--)
553 *data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
555 *ecc = t4_read_reg64(adap, MC_DATA(16));
561 * t4_edc_read - read from EDC through backdoor accesses
563 * @idx: which EDC to access
564 * @addr: address of first byte requested
565 * @data: 64 bytes of data containing the requested address
566 * @ecc: where to store the corresponding 64-bit ECC word
568 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
569 * that covers the requested address @addr. If @parity is not %NULL it
570 * is assigned the 64-bit ECC word for the read data.
572 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
575 u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
576 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
579 edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx);
580 edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx);
581 edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx);
582 edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN,
584 edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA,
588 * These macro are missing in t4_regs.h file.
589 * Added temporarily for testing.
591 #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
592 #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
593 edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx);
594 edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx);
595 edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx);
596 edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN,
598 edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA,
604 if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST)
606 t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU);
607 t4_write_reg(adap, edc_bist_cmd_len_reg, 64);
608 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
609 t4_write_reg(adap, edc_bist_cmd_reg,
610 V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
611 i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
615 #define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i)
617 for (i = 15; i >= 0; i--)
618 *data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
620 *ecc = t4_read_reg64(adap, EDC_DATA(16));
626 * t4_mem_read - read EDC 0, EDC 1 or MC into buffer
628 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
629 * @addr: address within indicated memory type
630 * @len: amount of memory to read
631 * @buf: host memory buffer
633 * Reads an [almost] arbitrary memory region in the firmware: the
634 * firmware memory address, length and host buffer must be aligned on
635 * 32-bit boudaries. The memory is returned as a raw byte sequence from
636 * the firmware's memory. If this memory contains data structures which
637 * contain multi-byte integers, it's the callers responsibility to
638 * perform appropriate byte order conversions.
640 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
643 u32 pos, start, end, offset;
647 * Argument sanity checks ...
649 if ((addr & 0x3) || (len & 0x3))
653 * The underlaying EDC/MC read routines read 64 bytes at a time so we
654 * need to round down the start and round up the end. We'll start
655 * copying out of the first line at (addr - start) a word at a time.
657 start = rounddown2(addr, 64);
658 end = roundup2(addr + len, 64);
659 offset = (addr - start)/sizeof(__be32);
661 for (pos = start; pos < end; pos += 64, offset = 0) {
665 * Read the chip's memory block and bail if there's an error.
667 if ((mtype == MEM_MC) || (mtype == MEM_MC1))
668 ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL);
670 ret = t4_edc_read(adap, mtype, pos, data, NULL);
675 * Copy the data into the caller's memory buffer.
677 while (offset < 16 && len > 0) {
678 *buf++ = data[offset++];
679 len -= sizeof(__be32);
687 * Return the specified PCI-E Configuration Space register from our Physical
688 * Function. We try first via a Firmware LDST Command (if fw_attach != 0)
689 * since we prefer to let the firmware own all of these registers, but if that
690 * fails we go for it directly ourselves.
692 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach)
696 * If fw_attach != 0, construct and send the Firmware LDST Command to
697 * retrieve the specified PCI-E Configuration Space register.
699 if (drv_fw_attach != 0) {
700 struct fw_ldst_cmd ldst_cmd;
703 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
704 ldst_cmd.op_to_addrspace =
705 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
708 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
709 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
710 ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1);
711 ldst_cmd.u.pcie.ctrl_to_fn =
712 (F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf));
713 ldst_cmd.u.pcie.r = reg;
716 * If the LDST Command succeeds, return the result, otherwise
717 * fall through to reading it directly ourselves ...
719 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
722 return be32_to_cpu(ldst_cmd.u.pcie.data[0]);
724 CH_WARN(adap, "Firmware failed to return "
725 "Configuration Space register %d, err = %d\n",
730 * Read the desired Configuration Space register via the PCI-E
731 * Backdoor mechanism.
733 return t4_hw_pci_read_cfg4(adap, reg);
737 * t4_get_regs_len - return the size of the chips register set
738 * @adapter: the adapter
740 * Returns the size of the chip's BAR0 register space.
742 unsigned int t4_get_regs_len(struct adapter *adapter)
744 unsigned int chip_version = chip_id(adapter);
746 switch (chip_version) {
748 if (adapter->flags & IS_VF)
749 return FW_T4VF_REGMAP_SIZE;
750 return T4_REGMAP_SIZE;
754 if (adapter->flags & IS_VF)
755 return FW_T4VF_REGMAP_SIZE;
756 return T5_REGMAP_SIZE;
760 "Unsupported chip version %d\n", chip_version);
765 * t4_get_regs - read chip registers into provided buffer
767 * @buf: register buffer
768 * @buf_size: size (in bytes) of register buffer
770 * If the provided register buffer isn't large enough for the chip's
771 * full register range, the register dump will be truncated to the
772 * register buffer's size.
774 void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
776 static const unsigned int t4_reg_ranges[] = {
1235 static const unsigned int t4vf_reg_ranges[] = {
1236 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
1237 VF_MPS_REG(A_MPS_VF_CTL),
1238 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
1239 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_WHOAMI),
1240 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
1241 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
1242 FW_T4VF_MBDATA_BASE_ADDR,
1243 FW_T4VF_MBDATA_BASE_ADDR +
1244 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
1247 static const unsigned int t5_reg_ranges[] = {
2014 static const unsigned int t5vf_reg_ranges[] = {
2015 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
2016 VF_MPS_REG(A_MPS_VF_CTL),
2017 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
2018 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION),
2019 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
2020 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
2021 FW_T4VF_MBDATA_BASE_ADDR,
2022 FW_T4VF_MBDATA_BASE_ADDR +
2023 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
2026 static const unsigned int t6_reg_ranges[] = {
2587 static const unsigned int t6vf_reg_ranges[] = {
2588 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
2589 VF_MPS_REG(A_MPS_VF_CTL),
2590 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
2591 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION),
2592 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
2593 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
2594 FW_T6VF_MBDATA_BASE_ADDR,
2595 FW_T6VF_MBDATA_BASE_ADDR +
2596 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
2599 u32 *buf_end = (u32 *)(buf + buf_size);
2600 const unsigned int *reg_ranges;
2601 int reg_ranges_size, range;
2602 unsigned int chip_version = chip_id(adap);
2605 * Select the right set of register ranges to dump depending on the
2606 * adapter chip type.
2608 switch (chip_version) {
2610 if (adap->flags & IS_VF) {
2611 reg_ranges = t4vf_reg_ranges;
2612 reg_ranges_size = ARRAY_SIZE(t4vf_reg_ranges);
2614 reg_ranges = t4_reg_ranges;
2615 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
2620 if (adap->flags & IS_VF) {
2621 reg_ranges = t5vf_reg_ranges;
2622 reg_ranges_size = ARRAY_SIZE(t5vf_reg_ranges);
2624 reg_ranges = t5_reg_ranges;
2625 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
2630 if (adap->flags & IS_VF) {
2631 reg_ranges = t6vf_reg_ranges;
2632 reg_ranges_size = ARRAY_SIZE(t6vf_reg_ranges);
2634 reg_ranges = t6_reg_ranges;
2635 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
2641 "Unsupported chip version %d\n", chip_version);
2646 * Clear the register buffer and insert the appropriate register
2647 * values selected by the above register ranges.
2649 memset(buf, 0, buf_size);
2650 for (range = 0; range < reg_ranges_size; range += 2) {
2651 unsigned int reg = reg_ranges[range];
2652 unsigned int last_reg = reg_ranges[range + 1];
2653 u32 *bufp = (u32 *)(buf + reg);
2656 * Iterate across the register range filling in the register
2657 * buffer but don't write past the end of the register buffer.
2659 while (reg <= last_reg && bufp < buf_end) {
2660 *bufp++ = t4_read_reg(adap, reg);
2667 * Partial EEPROM Vital Product Data structure. Includes only the ID and
2679 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
2681 #define EEPROM_DELAY 10 /* 10us per poll spin */
2682 #define EEPROM_MAX_POLL 5000 /* x 5000 == 50ms */
2684 #define EEPROM_STAT_ADDR 0x7bfc
2685 #define VPD_SIZE 0x800
2686 #define VPD_BASE 0x400
2687 #define VPD_BASE_OLD 0
2688 #define VPD_LEN 1024
2689 #define VPD_INFO_FLD_HDR_SIZE 3
2690 #define CHELSIO_VPD_UNIQUE_ID 0x82
2693 * Small utility function to wait till any outstanding VPD Access is complete.
2694 * We have a per-adapter state variable "VPD Busy" to indicate when we have a
2695 * VPD Access in flight. This allows us to handle the problem of having a
2696 * previous VPD Access time out and prevent an attempt to inject a new VPD
2697 * Request before any in-flight VPD reguest has completed.
2699 static int t4_seeprom_wait(struct adapter *adapter)
2701 unsigned int base = adapter->params.pci.vpd_cap_addr;
2705 * If no VPD Access is in flight, we can just return success right
2708 if (!adapter->vpd_busy)
2712 * Poll the VPD Capability Address/Flag register waiting for it
2713 * to indicate that the operation is complete.
2715 max_poll = EEPROM_MAX_POLL;
2719 udelay(EEPROM_DELAY);
2720 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
2723 * If the operation is complete, mark the VPD as no longer
2724 * busy and return success.
2726 if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
2727 adapter->vpd_busy = 0;
2730 } while (--max_poll);
2733 * Failure! Note that we leave the VPD Busy status set in order to
2734 * avoid pushing a new VPD Access request into the VPD Capability till
2735 * the current operation eventually succeeds. It's a bug to issue a
2736 * new request when an existing request is in flight and will result
2737 * in corrupt hardware state.
2743 * t4_seeprom_read - read a serial EEPROM location
2744 * @adapter: adapter to read
2745 * @addr: EEPROM virtual address
2746 * @data: where to store the read data
2748 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
2749 * VPD capability. Note that this function must be called with a virtual
2752 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
2754 unsigned int base = adapter->params.pci.vpd_cap_addr;
2758 * VPD Accesses must alway be 4-byte aligned!
2760 if (addr >= EEPROMVSIZE || (addr & 3))
2764 * Wait for any previous operation which may still be in flight to
2767 ret = t4_seeprom_wait(adapter);
2769 CH_ERR(adapter, "VPD still busy from previous operation\n");
2774 * Issue our new VPD Read request, mark the VPD as being busy and wait
2775 * for our request to complete. If it doesn't complete, note the
2776 * error and return it to our caller. Note that we do not reset the
2779 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
2780 adapter->vpd_busy = 1;
2781 adapter->vpd_flag = PCI_VPD_ADDR_F;
2782 ret = t4_seeprom_wait(adapter);
2784 CH_ERR(adapter, "VPD read of address %#x failed\n", addr);
2789 * Grab the returned data, swizzle it into our endianness and
2792 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
2793 *data = le32_to_cpu(*data);
2798 * t4_seeprom_write - write a serial EEPROM location
2799 * @adapter: adapter to write
2800 * @addr: virtual EEPROM address
2801 * @data: value to write
2803 * Write a 32-bit word to a location in serial EEPROM using the card's PCI
2804 * VPD capability. Note that this function must be called with a virtual
2807 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
2809 unsigned int base = adapter->params.pci.vpd_cap_addr;
2815 * VPD Accesses must alway be 4-byte aligned!
2817 if (addr >= EEPROMVSIZE || (addr & 3))
2821 * Wait for any previous operation which may still be in flight to
2824 ret = t4_seeprom_wait(adapter);
2826 CH_ERR(adapter, "VPD still busy from previous operation\n");
2831 * Issue our new VPD Read request, mark the VPD as being busy and wait
2832 * for our request to complete. If it doesn't complete, note the
2833 * error and return it to our caller. Note that we do not reset the
2836 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
2838 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
2839 (u16)addr | PCI_VPD_ADDR_F);
2840 adapter->vpd_busy = 1;
2841 adapter->vpd_flag = 0;
2842 ret = t4_seeprom_wait(adapter);
2844 CH_ERR(adapter, "VPD write of address %#x failed\n", addr);
2849 * Reset PCI_VPD_DATA register after a transaction and wait for our
2850 * request to complete. If it doesn't complete, return error.
2852 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
2853 max_poll = EEPROM_MAX_POLL;
2855 udelay(EEPROM_DELAY);
2856 t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
2857 } while ((stats_reg & 0x1) && --max_poll);
2861 /* Return success! */
2866 * t4_eeprom_ptov - translate a physical EEPROM address to virtual
2867 * @phys_addr: the physical EEPROM address
2868 * @fn: the PCI function number
2869 * @sz: size of function-specific area
2871 * Translate a physical EEPROM address to virtual. The first 1K is
2872 * accessed through virtual addresses starting at 31K, the rest is
2873 * accessed through virtual addresses starting at 0.
2875 * The mapping is as follows:
2876 * [0..1K) -> [31K..32K)
2877 * [1K..1K+A) -> [ES-A..ES)
2878 * [1K+A..ES) -> [0..ES-A-1K)
2880 * where A = @fn * @sz, and ES = EEPROM size.
2882 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2885 if (phys_addr < 1024)
2886 return phys_addr + (31 << 10);
2887 if (phys_addr < 1024 + fn)
2888 return EEPROMSIZE - fn + phys_addr - 1024;
2889 if (phys_addr < EEPROMSIZE)
2890 return phys_addr - 1024 - fn;
2895 * t4_seeprom_wp - enable/disable EEPROM write protection
2896 * @adapter: the adapter
2897 * @enable: whether to enable or disable write protection
2899 * Enables or disables write protection on the serial EEPROM.
2901 int t4_seeprom_wp(struct adapter *adapter, int enable)
2903 return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
2907 * get_vpd_keyword_val - Locates an information field keyword in the VPD
2908 * @v: Pointer to buffered vpd data structure
2909 * @kw: The keyword to search for
2911 * Returns the value of the information field keyword or
2912 * -ENOENT otherwise.
2914 static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
2917 unsigned int offset , len;
2918 const u8 *buf = (const u8 *)v;
2919 const u8 *vpdr_len = &v->vpdr_len[0];
2920 offset = sizeof(struct t4_vpd_hdr);
2921 len = (u16)vpdr_len[0] + ((u16)vpdr_len[1] << 8);
2923 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
2927 for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
2928 if(memcmp(buf + i , kw , 2) == 0){
2929 i += VPD_INFO_FLD_HDR_SIZE;
2933 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
2941 * get_vpd_params - read VPD parameters from VPD EEPROM
2942 * @adapter: adapter to read
2943 * @p: where to store the parameters
2944 * @vpd: caller provided temporary space to read the VPD into
2946 * Reads card parameters stored in VPD EEPROM.
2948 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p,
2954 const struct t4_vpd_hdr *v;
2957 * Card information normally starts at VPD_BASE but early cards had
2960 ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd));
2965 * The VPD shall have a unique identifier specified by the PCI SIG.
2966 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
2967 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
2968 * is expected to automatically put this entry at the
2969 * beginning of the VPD.
2971 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
2973 for (i = 0; i < VPD_LEN; i += 4) {
2974 ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i));
2978 v = (const struct t4_vpd_hdr *)vpd;
2980 #define FIND_VPD_KW(var,name) do { \
2981 var = get_vpd_keyword_val(v , name); \
2983 CH_ERR(adapter, "missing VPD keyword " name "\n"); \
2988 FIND_VPD_KW(i, "RV");
2989 for (csum = 0; i >= 0; i--)
2994 "corrupted VPD EEPROM, actual csum %u\n", csum);
2998 FIND_VPD_KW(ec, "EC");
2999 FIND_VPD_KW(sn, "SN");
3000 FIND_VPD_KW(pn, "PN");
3001 FIND_VPD_KW(na, "NA");
3004 memcpy(p->id, v->id_data, ID_LEN);
3006 memcpy(p->ec, vpd + ec, EC_LEN);
3008 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
3009 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
3011 i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2];
3012 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
3013 strstrip((char *)p->pn);
3014 i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2];
3015 memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
3016 strstrip((char *)p->na);
3021 /* serial flash and firmware constants and flash config file constants */
3023 SF_ATTEMPTS = 10, /* max retries for SF operations */
3025 /* flash command opcodes */
3026 SF_PROG_PAGE = 2, /* program 256B page */
3027 SF_WR_DISABLE = 4, /* disable writes */
3028 SF_RD_STATUS = 5, /* read status register */
3029 SF_WR_ENABLE = 6, /* enable writes */
3030 SF_RD_DATA_FAST = 0xb, /* read flash */
3031 SF_RD_ID = 0x9f, /* read ID */
3032 SF_ERASE_SECTOR = 0xd8, /* erase 64KB sector */
3036 * sf1_read - read data from the serial flash
3037 * @adapter: the adapter
3038 * @byte_cnt: number of bytes to read
3039 * @cont: whether another operation will be chained
3040 * @lock: whether to lock SF for PL access only
3041 * @valp: where to store the read data
3043 * Reads up to 4 bytes of data from the serial flash. The location of
3044 * the read needs to be specified prior to calling this by issuing the
3045 * appropriate commands to the serial flash.
3047 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
3048 int lock, u32 *valp)
3052 if (!byte_cnt || byte_cnt > 4)
3054 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3056 t4_write_reg(adapter, A_SF_OP,
3057 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
3058 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3060 *valp = t4_read_reg(adapter, A_SF_DATA);
3065 * sf1_write - write data to the serial flash
3066 * @adapter: the adapter
3067 * @byte_cnt: number of bytes to write
3068 * @cont: whether another operation will be chained
3069 * @lock: whether to lock SF for PL access only
3070 * @val: value to write
3072 * Writes up to 4 bytes of data to the serial flash. The location of
3073 * the write needs to be specified prior to calling this by issuing the
3074 * appropriate commands to the serial flash.
3076 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
3079 if (!byte_cnt || byte_cnt > 4)
3081 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3083 t4_write_reg(adapter, A_SF_DATA, val);
3084 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
3085 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
3086 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3090 * flash_wait_op - wait for a flash operation to complete
3091 * @adapter: the adapter
3092 * @attempts: max number of polls of the status register
3093 * @delay: delay between polls in ms
3095 * Wait for a flash operation to complete by polling the status register.
3097 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
3103 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
3104 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
3108 if (--attempts == 0)
3116 * t4_read_flash - read words from serial flash
3117 * @adapter: the adapter
3118 * @addr: the start address for the read
3119 * @nwords: how many 32-bit words to read
3120 * @data: where to store the read data
3121 * @byte_oriented: whether to store data as bytes or as words
3123 * Read the specified number of 32-bit words from the serial flash.
3124 * If @byte_oriented is set the read data is stored as a byte array
3125 * (i.e., big-endian), otherwise as 32-bit words in the platform's
3126 * natural endianness.
3128 int t4_read_flash(struct adapter *adapter, unsigned int addr,
3129 unsigned int nwords, u32 *data, int byte_oriented)
3133 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
3136 addr = swab32(addr) | SF_RD_DATA_FAST;
3138 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
3139 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
3142 for ( ; nwords; nwords--, data++) {
3143 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
3145 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3149 *data = (__force __u32)(cpu_to_be32(*data));
3155 * t4_write_flash - write up to a page of data to the serial flash
3156 * @adapter: the adapter
3157 * @addr: the start address to write
3158 * @n: length of data to write in bytes
3159 * @data: the data to write
3160 * @byte_oriented: whether to store data as bytes or as words
3162 * Writes up to a page of data (256 bytes) to the serial flash starting
3163 * at the given address. All the data must be written to the same page.
3164 * If @byte_oriented is set the write data is stored as byte stream
3165 * (i.e. matches what on disk), otherwise in big-endian.
3167 int t4_write_flash(struct adapter *adapter, unsigned int addr,
3168 unsigned int n, const u8 *data, int byte_oriented)
3171 u32 buf[SF_PAGE_SIZE / 4];
3172 unsigned int i, c, left, val, offset = addr & 0xff;
3174 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
3177 val = swab32(addr) | SF_PROG_PAGE;
3179 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3180 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
3183 for (left = n; left; left -= c) {
3185 for (val = 0, i = 0; i < c; ++i)
3186 val = (val << 8) + *data++;
3189 val = cpu_to_be32(val);
3191 ret = sf1_write(adapter, c, c != left, 1, val);
3195 ret = flash_wait_op(adapter, 8, 1);
3199 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3201 /* Read the page to verify the write succeeded */
3202 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
3207 if (memcmp(data - n, (u8 *)buf + offset, n)) {
3209 "failed to correctly write the flash page at %#x\n",
3216 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3221 * t4_get_fw_version - read the firmware version
3222 * @adapter: the adapter
3223 * @vers: where to place the version
3225 * Reads the FW version from flash.
3227 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
3229 return t4_read_flash(adapter, FLASH_FW_START +
3230 offsetof(struct fw_hdr, fw_ver), 1,
3235 * t4_get_bs_version - read the firmware bootstrap version
3236 * @adapter: the adapter
3237 * @vers: where to place the version
3239 * Reads the FW Bootstrap version from flash.
3241 int t4_get_bs_version(struct adapter *adapter, u32 *vers)
3243 return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
3244 offsetof(struct fw_hdr, fw_ver), 1,
3249 * t4_get_tp_version - read the TP microcode version
3250 * @adapter: the adapter
3251 * @vers: where to place the version
3253 * Reads the TP microcode version from flash.
3255 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
3257 return t4_read_flash(adapter, FLASH_FW_START +
3258 offsetof(struct fw_hdr, tp_microcode_ver),
3263 * t4_get_exprom_version - return the Expansion ROM version (if any)
3264 * @adapter: the adapter
3265 * @vers: where to place the version
3267 * Reads the Expansion ROM header from FLASH and returns the version
3268 * number (if present) through the @vers return value pointer. We return
3269 * this in the Firmware Version Format since it's convenient. Return
3270 * 0 on success, -ENOENT if no Expansion ROM is present.
3272 int t4_get_exprom_version(struct adapter *adap, u32 *vers)
3274 struct exprom_header {
3275 unsigned char hdr_arr[16]; /* must start with 0x55aa */
3276 unsigned char hdr_ver[4]; /* Expansion ROM version */
3278 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
3282 ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
3283 ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
3288 hdr = (struct exprom_header *)exprom_header_buf;
3289 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
3292 *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
3293 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
3294 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
3295 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
3300 * t4_get_scfg_version - return the Serial Configuration version
3301 * @adapter: the adapter
3302 * @vers: where to place the version
3304 * Reads the Serial Configuration Version via the Firmware interface
3305 * (thus this can only be called once we're ready to issue Firmware
3306 * commands). The format of the Serial Configuration version is
3307 * adapter specific. Returns 0 on success, an error on failure.
3309 * Note that early versions of the Firmware didn't include the ability
3310 * to retrieve the Serial Configuration version, so we zero-out the
3311 * return-value parameter in that case to avoid leaving it with
3314 * Also note that the Firmware will return its cached copy of the Serial
3315 * Initialization Revision ID, not the actual Revision ID as written in
3316 * the Serial EEPROM. This is only an issue if a new VPD has been written
3317 * and the Firmware/Chip haven't yet gone through a RESET sequence. So
3318 * it's best to defer calling this routine till after a FW_RESET_CMD has
3319 * been issued if the Host Driver will be performing a full adapter
3322 int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
3327 scfgrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3328 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_SCFGREV));
3329 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3330 1, &scfgrev_param, vers);
3337 * t4_get_vpd_version - return the VPD version
3338 * @adapter: the adapter
3339 * @vers: where to place the version
3341 * Reads the VPD via the Firmware interface (thus this can only be called
3342 * once we're ready to issue Firmware commands). The format of the
3343 * VPD version is adapter specific. Returns 0 on success, an error on
3346 * Note that early versions of the Firmware didn't include the ability
3347 * to retrieve the VPD version, so we zero-out the return-value parameter
3348 * in that case to avoid leaving it with garbage in it.
3350 * Also note that the Firmware will return its cached copy of the VPD
3351 * Revision ID, not the actual Revision ID as written in the Serial
3352 * EEPROM. This is only an issue if a new VPD has been written and the
3353 * Firmware/Chip haven't yet gone through a RESET sequence. So it's best
3354 * to defer calling this routine till after a FW_RESET_CMD has been issued
3355 * if the Host Driver will be performing a full adapter initialization.
3357 int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
3362 vpdrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3363 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_VPDREV));
3364 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3365 1, &vpdrev_param, vers);
3372 * t4_get_version_info - extract various chip/firmware version information
3373 * @adapter: the adapter
3375 * Reads various chip/firmware version numbers and stores them into the
3376 * adapter Adapter Parameters structure. If any of the efforts fails
3377 * the first failure will be returned, but all of the version numbers
3380 int t4_get_version_info(struct adapter *adapter)
3384 #define FIRST_RET(__getvinfo) \
3386 int __ret = __getvinfo; \
3387 if (__ret && !ret) \
3391 FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
3392 FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
3393 FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
3394 FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
3395 FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
3396 FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
3404 * t4_flash_erase_sectors - erase a range of flash sectors
3405 * @adapter: the adapter
3406 * @start: the first sector to erase
3407 * @end: the last sector to erase
3409 * Erases the sectors in the given inclusive range.
3411 int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
3415 if (end >= adapter->params.sf_nsec)
3418 while (start <= end) {
3419 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3420 (ret = sf1_write(adapter, 4, 0, 1,
3421 SF_ERASE_SECTOR | (start << 8))) != 0 ||
3422 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
3424 "erase of flash sector %d failed, error %d\n",
3430 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3435 * t4_flash_cfg_addr - return the address of the flash configuration file
3436 * @adapter: the adapter
3438 * Return the address within the flash where the Firmware Configuration
3439 * File is stored, or an error if the device FLASH is too small to contain
3440 * a Firmware Configuration File.
3442 int t4_flash_cfg_addr(struct adapter *adapter)
3445 * If the device FLASH isn't large enough to hold a Firmware
3446 * Configuration File, return an error.
3448 if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
3451 return FLASH_CFG_START;
3455 * Return TRUE if the specified firmware matches the adapter. I.e. T4
3456 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
3457 * and emit an error message for mismatched firmware to save our caller the
3460 static int t4_fw_matches_chip(struct adapter *adap,
3461 const struct fw_hdr *hdr)
3464 * The expression below will return FALSE for any unsupported adapter
3465 * which will keep us "honest" in the future ...
3467 if ((is_t4(adap) && hdr->chip == FW_HDR_CHIP_T4) ||
3468 (is_t5(adap) && hdr->chip == FW_HDR_CHIP_T5) ||
3469 (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6))
3473 "FW image (%d) is not suitable for this adapter (%d)\n",
3474 hdr->chip, chip_id(adap));
3479 * t4_load_fw - download firmware
3480 * @adap: the adapter
3481 * @fw_data: the firmware image to write
3484 * Write the supplied firmware image to the card's serial flash.
3486 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3491 u8 first_page[SF_PAGE_SIZE];
3492 const u32 *p = (const u32 *)fw_data;
3493 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
3494 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
3495 unsigned int fw_start_sec;
3496 unsigned int fw_start;
3497 unsigned int fw_size;
3499 if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) {
3500 fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
3501 fw_start = FLASH_FWBOOTSTRAP_START;
3502 fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
3504 fw_start_sec = FLASH_FW_START_SEC;
3505 fw_start = FLASH_FW_START;
3506 fw_size = FLASH_FW_MAX_SIZE;
3510 CH_ERR(adap, "FW image has no data\n");
3515 "FW image size not multiple of 512 bytes\n");
3518 if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) {
3520 "FW image size differs from size in FW header\n");
3523 if (size > fw_size) {
3524 CH_ERR(adap, "FW image too large, max is %u bytes\n",
3528 if (!t4_fw_matches_chip(adap, hdr))
3531 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
3532 csum += be32_to_cpu(p[i]);
3534 if (csum != 0xffffffff) {
3536 "corrupted firmware image, checksum %#x\n", csum);
3540 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
3541 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
3546 * We write the correct version at the end so the driver can see a bad
3547 * version if the FW write fails. Start by writing a copy of the
3548 * first page with a bad version.
3550 memcpy(first_page, fw_data, SF_PAGE_SIZE);
3551 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
3552 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
3557 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
3558 addr += SF_PAGE_SIZE;
3559 fw_data += SF_PAGE_SIZE;
3560 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
3565 ret = t4_write_flash(adap,
3566 fw_start + offsetof(struct fw_hdr, fw_ver),
3567 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
3570 CH_ERR(adap, "firmware download failed, error %d\n",
3576 * t4_fwcache - firmware cache operation
3577 * @adap: the adapter
3578 * @op : the operation (flush or flush and invalidate)
3580 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
3582 struct fw_params_cmd c;
3584 memset(&c, 0, sizeof(c));
3586 cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
3587 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
3588 V_FW_PARAMS_CMD_PFN(adap->pf) |
3589 V_FW_PARAMS_CMD_VFN(0));
3590 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3592 cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3593 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE));
3594 c.param[0].val = (__force __be32)op;
3596 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
3599 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
3600 unsigned int *pif_req_wrptr,
3601 unsigned int *pif_rsp_wrptr)
3604 u32 cfg, val, req, rsp;
3606 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
3607 if (cfg & F_LADBGEN)
3608 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
3610 val = t4_read_reg(adap, A_CIM_DEBUGSTS);
3611 req = G_POLADBGWRPTR(val);
3612 rsp = G_PILADBGWRPTR(val);
3614 *pif_req_wrptr = req;
3616 *pif_rsp_wrptr = rsp;
3618 for (i = 0; i < CIM_PIFLA_SIZE; i++) {
3619 for (j = 0; j < 6; j++) {
3620 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
3621 V_PILADBGRDPTR(rsp));
3622 *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
3623 *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
3627 req = (req + 2) & M_POLADBGRDPTR;
3628 rsp = (rsp + 2) & M_PILADBGRDPTR;
3630 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
3633 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
3638 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
3639 if (cfg & F_LADBGEN)
3640 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
3642 for (i = 0; i < CIM_MALA_SIZE; i++) {
3643 for (j = 0; j < 5; j++) {
3645 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
3646 V_PILADBGRDPTR(idx));
3647 *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
3648 *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
3651 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
3654 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
3658 for (i = 0; i < 8; i++) {
3659 u32 *p = la_buf + i;
3661 t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
3662 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
3663 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
3664 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
3665 *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
3670 * t4_link_l1cfg - apply link configuration to MAC/PHY
3671 * @phy: the PHY to setup
3672 * @mac: the MAC to setup
3673 * @lc: the requested link configuration
3675 * Set up a port's MAC and PHY according to a desired link configuration.
3676 * - If the PHY can auto-negotiate first decide what to advertise, then
3677 * enable/disable auto-negotiation as desired, and reset.
3678 * - If the PHY does not auto-negotiate just reset it.
3679 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
3680 * otherwise do it later based on the outcome of auto-negotiation.
3682 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
3683 struct link_config *lc)
3685 struct fw_port_cmd c;
3686 unsigned int mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
3687 unsigned int aneg, fc, fec, speed, rcap;
3690 if (lc->requested_fc & PAUSE_RX)
3691 fc |= FW_PORT_CAP_FC_RX;
3692 if (lc->requested_fc & PAUSE_TX)
3693 fc |= FW_PORT_CAP_FC_TX;
3696 if (lc->requested_fec & FEC_RS)
3697 fec = FW_PORT_CAP_FEC_RS;
3698 else if (lc->requested_fec & FEC_BASER_RS)
3699 fec = FW_PORT_CAP_FEC_BASER_RS;
3700 else if (lc->requested_fec & FEC_RESERVED)
3701 fec = FW_PORT_CAP_FEC_RESERVED;
3703 if (!(lc->supported & FW_PORT_CAP_ANEG) ||
3704 lc->requested_aneg == AUTONEG_DISABLE) {
3706 switch (lc->requested_speed) {
3708 speed = FW_PORT_CAP_SPEED_100G;
3711 speed = FW_PORT_CAP_SPEED_40G;
3714 speed = FW_PORT_CAP_SPEED_25G;
3717 speed = FW_PORT_CAP_SPEED_10G;
3720 speed = FW_PORT_CAP_SPEED_1G;
3727 aneg = FW_PORT_CAP_ANEG;
3728 speed = lc->supported &
3729 V_FW_PORT_CAP_SPEED(M_FW_PORT_CAP_SPEED);
3732 rcap = aneg | speed | fc | fec;
3733 if ((rcap | lc->supported) != lc->supported) {
3734 CH_WARN(adap, "rcap 0x%08x, pcap 0x%08x\n", rcap,
3736 rcap &= lc->supported;
3740 memset(&c, 0, sizeof(c));
3741 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
3742 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
3743 V_FW_PORT_CMD_PORTID(port));
3745 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
3747 c.u.l1cfg.rcap = cpu_to_be32(rcap);
3749 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
3753 * t4_restart_aneg - restart autonegotiation
3754 * @adap: the adapter
3755 * @mbox: mbox to use for the FW command
3756 * @port: the port id
3758 * Restarts autonegotiation for the selected port.
3760 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
3762 struct fw_port_cmd c;
3764 memset(&c, 0, sizeof(c));
3765 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
3766 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
3767 V_FW_PORT_CMD_PORTID(port));
3769 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
3771 c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
3772 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3775 typedef void (*int_handler_t)(struct adapter *adap);
3778 unsigned int mask; /* bits to check in interrupt status */
3779 const char *msg; /* message to print or NULL */
3780 short stat_idx; /* stat counter to increment or -1 */
3781 unsigned short fatal; /* whether the condition reported is fatal */
3782 int_handler_t int_handler; /* platform-specific int handler */
3786 * t4_handle_intr_status - table driven interrupt handler
3787 * @adapter: the adapter that generated the interrupt
3788 * @reg: the interrupt status register to process
3789 * @acts: table of interrupt actions
3791 * A table driven interrupt handler that applies a set of masks to an
3792 * interrupt status word and performs the corresponding actions if the
3793 * interrupts described by the mask have occurred. The actions include
3794 * optionally emitting a warning or alert message. The table is terminated
3795 * by an entry specifying mask 0. Returns the number of fatal interrupt
3798 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
3799 const struct intr_info *acts)
3802 unsigned int mask = 0;
3803 unsigned int status = t4_read_reg(adapter, reg);
3805 for ( ; acts->mask; ++acts) {
3806 if (!(status & acts->mask))
3810 CH_ALERT(adapter, "%s (0x%x)\n", acts->msg,
3811 status & acts->mask);
3812 } else if (acts->msg)
3813 CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n", acts->msg,
3814 status & acts->mask);
3815 if (acts->int_handler)
3816 acts->int_handler(adapter);
3820 if (status) /* clear processed interrupts */
3821 t4_write_reg(adapter, reg, status);
3826 * Interrupt handler for the PCIE module.
3828 static void pcie_intr_handler(struct adapter *adapter)
3830 static const struct intr_info sysbus_intr_info[] = {
3831 { F_RNPP, "RXNP array parity error", -1, 1 },
3832 { F_RPCP, "RXPC array parity error", -1, 1 },
3833 { F_RCIP, "RXCIF array parity error", -1, 1 },
3834 { F_RCCP, "Rx completions control array parity error", -1, 1 },
3835 { F_RFTP, "RXFT array parity error", -1, 1 },
3838 static const struct intr_info pcie_port_intr_info[] = {
3839 { F_TPCP, "TXPC array parity error", -1, 1 },
3840 { F_TNPP, "TXNP array parity error", -1, 1 },
3841 { F_TFTP, "TXFT array parity error", -1, 1 },
3842 { F_TCAP, "TXCA array parity error", -1, 1 },
3843 { F_TCIP, "TXCIF array parity error", -1, 1 },
3844 { F_RCAP, "RXCA array parity error", -1, 1 },
3845 { F_OTDD, "outbound request TLP discarded", -1, 1 },
3846 { F_RDPE, "Rx data parity error", -1, 1 },
3847 { F_TDUE, "Tx uncorrectable data error", -1, 1 },
3850 static const struct intr_info pcie_intr_info[] = {
3851 { F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
3852 { F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
3853 { F_MSIDATAPERR, "MSI data parity error", -1, 1 },
3854 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
3855 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
3856 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
3857 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
3858 { F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
3859 { F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
3860 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
3861 { F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
3862 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
3863 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
3864 { F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
3865 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
3866 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
3867 { F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
3868 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
3869 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
3870 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
3871 { F_FIDPERR, "PCI FID parity error", -1, 1 },
3872 { F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
3873 { F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
3874 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
3875 { F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
3876 { F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
3877 { F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
3878 { F_PCIESINT, "PCI core secondary fault", -1, 1 },
3879 { F_PCIEPINT, "PCI core primary fault", -1, 1 },
3880 { F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
3885 static const struct intr_info t5_pcie_intr_info[] = {
3886 { F_MSTGRPPERR, "Master Response Read Queue parity error",
3888 { F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
3889 { F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
3890 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
3891 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
3892 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
3893 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
3894 { F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
3896 { F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
3898 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
3899 { F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
3900 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
3901 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
3902 { F_DREQWRPERR, "PCI DMA channel write request parity error",
3904 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
3905 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
3906 { F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
3907 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
3908 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
3909 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
3910 { F_FIDPERR, "PCI FID parity error", -1, 1 },
3911 { F_VFIDPERR, "PCI INTx clear parity error", -1, 1 },
3912 { F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
3913 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
3914 { F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
3916 { F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
3918 { F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
3919 { F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
3920 { F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
3921 { F_READRSPERR, "Outbound read error", -1,
3929 fat = t4_handle_intr_status(adapter,
3930 A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
3932 t4_handle_intr_status(adapter,
3933 A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
3934 pcie_port_intr_info) +
3935 t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
3938 fat = t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
3941 t4_fatal_err(adapter);
3945 * TP interrupt handler.
3947 static void tp_intr_handler(struct adapter *adapter)
3949 static const struct intr_info tp_intr_info[] = {
3950 { 0x3fffffff, "TP parity error", -1, 1 },
3951 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
3955 if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
3956 t4_fatal_err(adapter);
3960 * SGE interrupt handler.
3962 static void sge_intr_handler(struct adapter *adapter)
3967 static const struct intr_info sge_intr_info[] = {
3968 { F_ERR_CPL_EXCEED_IQE_SIZE,
3969 "SGE received CPL exceeding IQE size", -1, 1 },
3970 { F_ERR_INVALID_CIDX_INC,
3971 "SGE GTS CIDX increment too large", -1, 0 },
3972 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
3973 { F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
3974 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
3975 "SGE IQID > 1023 received CPL for FL", -1, 0 },
3976 { F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
3978 { F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
3980 { F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
3982 { F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
3984 { F_ERR_ING_CTXT_PRIO,
3985 "SGE too many priority ingress contexts", -1, 0 },
3986 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
3987 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
3988 { F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 |
3989 F_ERR_PCIE_ERROR2 | F_ERR_PCIE_ERROR3,
3990 "SGE PCIe error for a DBP thread", -1, 0 },
3994 static const struct intr_info t4t5_sge_intr_info[] = {
3995 { F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
3996 { F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
3997 { F_ERR_EGR_CTXT_PRIO,
3998 "SGE too many priority egress contexts", -1, 0 },
4003 * For now, treat below interrupts as fatal so that we disable SGE and
4004 * get better debug */
4005 static const struct intr_info t6_sge_intr_info[] = {
4007 "SGE Actual WRE packet is less than advertized length",
4012 v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
4013 ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
4015 CH_ALERT(adapter, "SGE parity error (%#llx)\n",
4016 (unsigned long long)v);
4017 t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
4018 t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
4021 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
4022 if (chip_id(adapter) <= CHELSIO_T5)
4023 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
4024 t4t5_sge_intr_info);
4026 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
4029 err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
4030 if (err & F_ERROR_QID_VALID) {
4031 CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
4032 if (err & F_UNCAPTURED_ERROR)
4033 CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
4034 t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
4035 F_UNCAPTURED_ERROR);
4039 t4_fatal_err(adapter);
4042 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
4043 F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
4044 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
4045 F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
4048 * CIM interrupt handler.
4050 static void cim_intr_handler(struct adapter *adapter)
4052 static const struct intr_info cim_intr_info[] = {
4053 { F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
4054 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
4055 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
4056 { F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
4057 { F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
4058 { F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
4059 { F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
4060 { F_TIMER0INT, "CIM TIMER0 interrupt", -1, 1 },
4063 static const struct intr_info cim_upintr_info[] = {
4064 { F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
4065 { F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
4066 { F_ILLWRINT, "CIM illegal write", -1, 1 },
4067 { F_ILLRDINT, "CIM illegal read", -1, 1 },
4068 { F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
4069 { F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
4070 { F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
4071 { F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
4072 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
4073 { F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
4074 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
4075 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
4076 { F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
4077 { F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
4078 { F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
4079 { F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
4080 { F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
4081 { F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
4082 { F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
4083 { F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
4084 { F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
4085 { F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
4086 { F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
4087 { F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
4088 { F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
4089 { F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
4090 { F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
4091 { F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
4097 fw_err = t4_read_reg(adapter, A_PCIE_FW);
4098 if (fw_err & F_PCIE_FW_ERR)
4099 t4_report_fw_error(adapter);
4101 /* When the Firmware detects an internal error which normally wouldn't
4102 * raise a Host Interrupt, it forces a CIM Timer0 interrupt in order
4103 * to make sure the Host sees the Firmware Crash. So if we have a
4104 * Timer0 interrupt and don't see a Firmware Crash, ignore the Timer0
4107 val = t4_read_reg(adapter, A_CIM_HOST_INT_CAUSE);
4108 if (val & F_TIMER0INT)
4109 if (!(fw_err & F_PCIE_FW_ERR) ||
4110 (G_PCIE_FW_EVAL(fw_err) != PCIE_FW_EVAL_CRASH))
4111 t4_write_reg(adapter, A_CIM_HOST_INT_CAUSE,
4114 fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
4116 t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
4119 t4_fatal_err(adapter);
4123 * ULP RX interrupt handler.
4125 static void ulprx_intr_handler(struct adapter *adapter)
4127 static const struct intr_info ulprx_intr_info[] = {
4128 { F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
4129 { F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
4130 { 0x7fffff, "ULPRX parity error", -1, 1 },
4134 if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
4135 t4_fatal_err(adapter);
4139 * ULP TX interrupt handler.
4141 static void ulptx_intr_handler(struct adapter *adapter)
4143 static const struct intr_info ulptx_intr_info[] = {
4144 { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
4146 { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
4148 { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
4150 { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
4152 { 0xfffffff, "ULPTX parity error", -1, 1 },
4156 if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
4157 t4_fatal_err(adapter);
4161 * PM TX interrupt handler.
4163 static void pmtx_intr_handler(struct adapter *adapter)
4165 static const struct intr_info pmtx_intr_info[] = {
4166 { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
4167 { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
4168 { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
4169 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
4170 { 0xffffff0, "PMTX framing error", -1, 1 },
4171 { F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
4172 { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
4174 { F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
4175 { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
4179 if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
4180 t4_fatal_err(adapter);
4184 * PM RX interrupt handler.
4186 static void pmrx_intr_handler(struct adapter *adapter)
4188 static const struct intr_info pmrx_intr_info[] = {
4189 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
4190 { 0x3ffff0, "PMRX framing error", -1, 1 },
4191 { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
4192 { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
4194 { F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
4195 { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
4199 if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
4200 t4_fatal_err(adapter);
4204 * CPL switch interrupt handler.
4206 static void cplsw_intr_handler(struct adapter *adapter)
4208 static const struct intr_info cplsw_intr_info[] = {
4209 { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
4210 { F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
4211 { F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
4212 { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
4213 { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
4214 { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
4218 if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
4219 t4_fatal_err(adapter);
4223 * LE interrupt handler.
4225 static void le_intr_handler(struct adapter *adap)
4227 unsigned int chip_ver = chip_id(adap);
4228 static const struct intr_info le_intr_info[] = {
4229 { F_LIPMISS, "LE LIP miss", -1, 0 },
4230 { F_LIP0, "LE 0 LIP error", -1, 0 },
4231 { F_PARITYERR, "LE parity error", -1, 1 },
4232 { F_UNKNOWNCMD, "LE unknown command", -1, 1 },
4233 { F_REQQPARERR, "LE request queue parity error", -1, 1 },
4237 static const struct intr_info t6_le_intr_info[] = {
4238 { F_T6_LIPMISS, "LE LIP miss", -1, 0 },
4239 { F_T6_LIP0, "LE 0 LIP error", -1, 0 },
4240 { F_TCAMINTPERR, "LE parity error", -1, 1 },
4241 { F_T6_UNKNOWNCMD, "LE unknown command", -1, 1 },
4242 { F_SSRAMINTPERR, "LE request queue parity error", -1, 1 },
4246 if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE,
4247 (chip_ver <= CHELSIO_T5) ?
4248 le_intr_info : t6_le_intr_info))
4253 * MPS interrupt handler.
4255 static void mps_intr_handler(struct adapter *adapter)
4257 static const struct intr_info mps_rx_intr_info[] = {
4258 { 0xffffff, "MPS Rx parity error", -1, 1 },
4261 static const struct intr_info mps_tx_intr_info[] = {
4262 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
4263 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4264 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
4266 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
4268 { F_BUBBLE, "MPS Tx underflow", -1, 1 },
4269 { F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
4270 { F_FRMERR, "MPS Tx framing error", -1, 1 },
4273 static const struct intr_info mps_trc_intr_info[] = {
4274 { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
4275 { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
4277 { F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
4280 static const struct intr_info mps_stat_sram_intr_info[] = {
4281 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
4284 static const struct intr_info mps_stat_tx_intr_info[] = {
4285 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
4288 static const struct intr_info mps_stat_rx_intr_info[] = {
4289 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
4292 static const struct intr_info mps_cls_intr_info[] = {
4293 { F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
4294 { F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
4295 { F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
4301 fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
4303 t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
4305 t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
4306 mps_trc_intr_info) +
4307 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
4308 mps_stat_sram_intr_info) +
4309 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
4310 mps_stat_tx_intr_info) +
4311 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
4312 mps_stat_rx_intr_info) +
4313 t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
4316 t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
4317 t4_read_reg(adapter, A_MPS_INT_CAUSE); /* flush */
4319 t4_fatal_err(adapter);
4322 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | \
4326 * EDC/MC interrupt handler.
4328 static void mem_intr_handler(struct adapter *adapter, int idx)
4330 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
4332 unsigned int addr, cnt_addr, v;
4334 if (idx <= MEM_EDC1) {
4335 addr = EDC_REG(A_EDC_INT_CAUSE, idx);
4336 cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
4337 } else if (idx == MEM_MC) {
4338 if (is_t4(adapter)) {
4339 addr = A_MC_INT_CAUSE;
4340 cnt_addr = A_MC_ECC_STATUS;
4342 addr = A_MC_P_INT_CAUSE;
4343 cnt_addr = A_MC_P_ECC_STATUS;
4346 addr = MC_REG(A_MC_P_INT_CAUSE, 1);
4347 cnt_addr = MC_REG(A_MC_P_ECC_STATUS, 1);
4350 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
4351 if (v & F_PERR_INT_CAUSE)
4352 CH_ALERT(adapter, "%s FIFO parity error\n",
4354 if (v & F_ECC_CE_INT_CAUSE) {
4355 u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
4357 if (idx <= MEM_EDC1)
4358 t4_edc_err_read(adapter, idx);
4360 t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
4361 CH_WARN_RATELIMIT(adapter,
4362 "%u %s correctable ECC data error%s\n",
4363 cnt, name[idx], cnt > 1 ? "s" : "");
4365 if (v & F_ECC_UE_INT_CAUSE)
4367 "%s uncorrectable ECC data error\n", name[idx]);
4369 t4_write_reg(adapter, addr, v);
4370 if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
4371 t4_fatal_err(adapter);
4375 * MA interrupt handler.
4377 static void ma_intr_handler(struct adapter *adapter)
4379 u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
4381 if (status & F_MEM_PERR_INT_CAUSE) {
4383 "MA parity error, parity status %#x\n",
4384 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS1));
4387 "MA parity error, parity status %#x\n",
4388 t4_read_reg(adapter,
4389 A_MA_PARITY_ERROR_STATUS2));
4391 if (status & F_MEM_WRAP_INT_CAUSE) {
4392 v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
4393 CH_ALERT(adapter, "MA address wrap-around error by "
4394 "client %u to address %#x\n",
4395 G_MEM_WRAP_CLIENT_NUM(v),
4396 G_MEM_WRAP_ADDRESS(v) << 4);
4398 t4_write_reg(adapter, A_MA_INT_CAUSE, status);
4399 t4_fatal_err(adapter);
4403 * SMB interrupt handler.
4405 static void smb_intr_handler(struct adapter *adap)
4407 static const struct intr_info smb_intr_info[] = {
4408 { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
4409 { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
4410 { F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
4414 if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
4419 * NC-SI interrupt handler.
4421 static void ncsi_intr_handler(struct adapter *adap)
4423 static const struct intr_info ncsi_intr_info[] = {
4424 { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
4425 { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
4426 { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
4427 { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
4431 if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
4436 * XGMAC interrupt handler.
4438 static void xgmac_intr_handler(struct adapter *adap, int port)
4440 u32 v, int_cause_reg;
4443 int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
4445 int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
4447 v = t4_read_reg(adap, int_cause_reg);
4449 v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR);
4453 if (v & F_TXFIFO_PRTY_ERR)
4454 CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n",
4456 if (v & F_RXFIFO_PRTY_ERR)
4457 CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n",
4459 t4_write_reg(adap, int_cause_reg, v);
4464 * PL interrupt handler.
4466 static void pl_intr_handler(struct adapter *adap)
4468 static const struct intr_info pl_intr_info[] = {
4469 { F_FATALPERR, "Fatal parity error", -1, 1 },
4470 { F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
4474 static const struct intr_info t5_pl_intr_info[] = {
4475 { F_FATALPERR, "Fatal parity error", -1, 1 },
4479 if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE,
4481 pl_intr_info : t5_pl_intr_info))
4485 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
4488 * t4_slow_intr_handler - control path interrupt handler
4489 * @adapter: the adapter
4491 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
4492 * The designation 'slow' is because it involves register reads, while
4493 * data interrupts typically don't involve any MMIOs.
4495 int t4_slow_intr_handler(struct adapter *adapter)
4497 u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
4499 if (!(cause & GLBL_INTR_MASK))
4502 cim_intr_handler(adapter);
4504 mps_intr_handler(adapter);
4506 ncsi_intr_handler(adapter);
4508 pl_intr_handler(adapter);
4510 smb_intr_handler(adapter);
4512 xgmac_intr_handler(adapter, 0);
4514 xgmac_intr_handler(adapter, 1);
4516 xgmac_intr_handler(adapter, 2);
4518 xgmac_intr_handler(adapter, 3);
4520 pcie_intr_handler(adapter);
4522 mem_intr_handler(adapter, MEM_MC);
4523 if (is_t5(adapter) && (cause & F_MC1))
4524 mem_intr_handler(adapter, MEM_MC1);
4526 mem_intr_handler(adapter, MEM_EDC0);
4528 mem_intr_handler(adapter, MEM_EDC1);
4530 le_intr_handler(adapter);
4532 tp_intr_handler(adapter);
4534 ma_intr_handler(adapter);
4535 if (cause & F_PM_TX)
4536 pmtx_intr_handler(adapter);
4537 if (cause & F_PM_RX)
4538 pmrx_intr_handler(adapter);
4539 if (cause & F_ULP_RX)
4540 ulprx_intr_handler(adapter);
4541 if (cause & F_CPL_SWITCH)
4542 cplsw_intr_handler(adapter);
4544 sge_intr_handler(adapter);
4545 if (cause & F_ULP_TX)
4546 ulptx_intr_handler(adapter);
4548 /* Clear the interrupts just processed for which we are the master. */
4549 t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
4550 (void)t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
4555 * t4_intr_enable - enable interrupts
4556 * @adapter: the adapter whose interrupts should be enabled
4558 * Enable PF-specific interrupts for the calling function and the top-level
4559 * interrupt concentrator for global interrupts. Interrupts are already
4560 * enabled at each module, here we just enable the roots of the interrupt
4563 * Note: this function should be called only when the driver manages
4564 * non PF-specific interrupts from the various HW modules. Only one PCI
4565 * function at a time should be doing this.
4567 void t4_intr_enable(struct adapter *adapter)
4570 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4571 u32 pf = (chip_id(adapter) <= CHELSIO_T5
4572 ? G_SOURCEPF(whoami)
4573 : G_T6_SOURCEPF(whoami));
4575 if (chip_id(adapter) <= CHELSIO_T5)
4576 val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
4578 val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN;
4579 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
4580 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
4581 F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR |
4582 F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
4583 F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
4584 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
4585 F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val);
4586 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
4587 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
4591 * t4_intr_disable - disable interrupts
4592 * @adapter: the adapter whose interrupts should be disabled
4594 * Disable interrupts. We only disable the top-level interrupt
4595 * concentrators. The caller must be a PCI function managing global
4598 void t4_intr_disable(struct adapter *adapter)
4600 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4601 u32 pf = (chip_id(adapter) <= CHELSIO_T5
4602 ? G_SOURCEPF(whoami)
4603 : G_T6_SOURCEPF(whoami));
4605 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
4606 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
4610 * t4_intr_clear - clear all interrupts
4611 * @adapter: the adapter whose interrupts should be cleared
4613 * Clears all interrupts. The caller must be a PCI function managing
4614 * global interrupts.
4616 void t4_intr_clear(struct adapter *adapter)
4618 static const unsigned int cause_reg[] = {
4619 A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
4620 A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
4621 A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS1, A_MA_INT_CAUSE,
4622 A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
4623 A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
4624 MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
4626 A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE,
4627 A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE,
4628 A_MPS_RX_PERR_INT_CAUSE,
4630 MYPF_REG(A_PL_PF_INT_CAUSE),
4637 for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
4638 t4_write_reg(adapter, cause_reg[i], 0xffffffff);
4640 t4_write_reg(adapter, is_t4(adapter) ? A_MC_INT_CAUSE :
4641 A_MC_P_INT_CAUSE, 0xffffffff);
4643 if (is_t4(adapter)) {
4644 t4_write_reg(adapter, A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
4646 t4_write_reg(adapter, A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
4649 t4_write_reg(adapter, A_MA_PARITY_ERROR_STATUS2, 0xffffffff);
4651 t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
4652 (void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
4656 * hash_mac_addr - return the hash value of a MAC address
4657 * @addr: the 48-bit Ethernet MAC address
4659 * Hashes a MAC address according to the hash function used by HW inexact
4660 * (hash) address matching.
4662 static int hash_mac_addr(const u8 *addr)
4664 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
4665 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
4673 * t4_config_rss_range - configure a portion of the RSS mapping table
4674 * @adapter: the adapter
4675 * @mbox: mbox to use for the FW command
4676 * @viid: virtual interface whose RSS subtable is to be written
4677 * @start: start entry in the table to write
4678 * @n: how many table entries to write
4679 * @rspq: values for the "response queue" (Ingress Queue) lookup table
4680 * @nrspq: number of values in @rspq
4682 * Programs the selected part of the VI's RSS mapping table with the
4683 * provided values. If @nrspq < @n the supplied values are used repeatedly
4684 * until the full table range is populated.
4686 * The caller must ensure the values in @rspq are in the range allowed for
4689 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
4690 int start, int n, const u16 *rspq, unsigned int nrspq)
4693 const u16 *rsp = rspq;
4694 const u16 *rsp_end = rspq + nrspq;
4695 struct fw_rss_ind_tbl_cmd cmd;
4697 memset(&cmd, 0, sizeof(cmd));
4698 cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
4699 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4700 V_FW_RSS_IND_TBL_CMD_VIID(viid));
4701 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
4704 * Each firmware RSS command can accommodate up to 32 RSS Ingress
4705 * Queue Identifiers. These Ingress Queue IDs are packed three to
4706 * a 32-bit word as 10-bit values with the upper remaining 2 bits
4710 int nq = min(n, 32);
4712 __be32 *qp = &cmd.iq0_to_iq2;
4715 * Set up the firmware RSS command header to send the next
4716 * "nq" Ingress Queue IDs to the firmware.
4718 cmd.niqid = cpu_to_be16(nq);
4719 cmd.startidx = cpu_to_be16(start);
4722 * "nq" more done for the start of the next loop.
4728 * While there are still Ingress Queue IDs to stuff into the
4729 * current firmware RSS command, retrieve them from the
4730 * Ingress Queue ID array and insert them into the command.
4734 * Grab up to the next 3 Ingress Queue IDs (wrapping
4735 * around the Ingress Queue ID array if necessary) and
4736 * insert them into the firmware RSS command at the
4737 * current 3-tuple position within the commad.
4741 int nqbuf = min(3, nq);
4744 qbuf[0] = qbuf[1] = qbuf[2] = 0;
4745 while (nqbuf && nq_packed < 32) {
4752 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
4753 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
4754 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
4758 * Send this portion of the RRS table update to the firmware;
4759 * bail out on any errors.
4761 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
4769 * t4_config_glbl_rss - configure the global RSS mode
4770 * @adapter: the adapter
4771 * @mbox: mbox to use for the FW command
4772 * @mode: global RSS mode
4773 * @flags: mode-specific flags
4775 * Sets the global RSS mode.
4777 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
4780 struct fw_rss_glb_config_cmd c;
4782 memset(&c, 0, sizeof(c));
4783 c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
4784 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
4785 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4786 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
4787 c.u.manual.mode_pkd =
4788 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
4789 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
4790 c.u.basicvirtual.mode_keymode =
4791 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
4792 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
4795 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
4799 * t4_config_vi_rss - configure per VI RSS settings
4800 * @adapter: the adapter
4801 * @mbox: mbox to use for the FW command
4804 * @defq: id of the default RSS queue for the VI.
4805 * @skeyidx: RSS secret key table index for non-global mode
4806 * @skey: RSS vf_scramble key for VI.
4808 * Configures VI-specific RSS properties.
4810 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
4811 unsigned int flags, unsigned int defq, unsigned int skeyidx,
4814 struct fw_rss_vi_config_cmd c;
4816 memset(&c, 0, sizeof(c));
4817 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
4818 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4819 V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
4820 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4821 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
4822 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
4823 c.u.basicvirtual.secretkeyidx_pkd = cpu_to_be32(
4824 V_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(skeyidx));
4825 c.u.basicvirtual.secretkeyxor = cpu_to_be32(skey);
4827 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
4830 /* Read an RSS table row */
4831 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
4833 t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
4834 return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
4839 * t4_read_rss - read the contents of the RSS mapping table
4840 * @adapter: the adapter
4841 * @map: holds the contents of the RSS mapping table
4843 * Reads the contents of the RSS hash->queue mapping table.
4845 int t4_read_rss(struct adapter *adapter, u16 *map)
4850 for (i = 0; i < RSS_NENTRIES / 2; ++i) {
4851 ret = rd_rss_row(adapter, i, &val);
4854 *map++ = G_LKPTBLQUEUE0(val);
4855 *map++ = G_LKPTBLQUEUE1(val);
4861 * t4_tp_fw_ldst_rw - Access TP indirect register through LDST
4862 * @adap: the adapter
4863 * @cmd: TP fw ldst address space type
4864 * @vals: where the indirect register values are stored/written
4865 * @nregs: how many indirect registers to read/write
4866 * @start_idx: index of first indirect register to read/write
4867 * @rw: Read (1) or Write (0)
4868 * @sleep_ok: if true we may sleep while awaiting command completion
4870 * Access TP indirect registers through LDST
4872 static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals,
4873 unsigned int nregs, unsigned int start_index,
4874 unsigned int rw, bool sleep_ok)
4878 struct fw_ldst_cmd c;
4880 for (i = 0; i < nregs; i++) {
4881 memset(&c, 0, sizeof(c));
4882 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
4884 (rw ? F_FW_CMD_READ :
4886 V_FW_LDST_CMD_ADDRSPACE(cmd));
4887 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
4889 c.u.addrval.addr = cpu_to_be32(start_index + i);
4890 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
4891 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c,
4897 vals[i] = be32_to_cpu(c.u.addrval.val);
4903 * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor
4904 * @adap: the adapter
4905 * @reg_addr: Address Register
4906 * @reg_data: Data register
4907 * @buff: where the indirect register values are stored/written
4908 * @nregs: how many indirect registers to read/write
4909 * @start_index: index of first indirect register to read/write
4910 * @rw: READ(1) or WRITE(0)
4911 * @sleep_ok: if true we may sleep while awaiting command completion
4913 * Read/Write TP indirect registers through LDST if possible.
4914 * Else, use backdoor access
4916 static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data,
4917 u32 *buff, u32 nregs, u32 start_index, int rw,
4925 cmd = FW_LDST_ADDRSPC_TP_PIO;
4927 case A_TP_TM_PIO_ADDR:
4928 cmd = FW_LDST_ADDRSPC_TP_TM_PIO;
4930 case A_TP_MIB_INDEX:
4931 cmd = FW_LDST_ADDRSPC_TP_MIB;
4934 goto indirect_access;
4937 if (t4_use_ldst(adap))
4938 rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw,
4945 t4_read_indirect(adap, reg_addr, reg_data, buff, nregs,
4948 t4_write_indirect(adap, reg_addr, reg_data, buff, nregs,
4954 * t4_tp_pio_read - Read TP PIO registers
4955 * @adap: the adapter
4956 * @buff: where the indirect register values are written
4957 * @nregs: how many indirect registers to read
4958 * @start_index: index of first indirect register to read
4959 * @sleep_ok: if true we may sleep while awaiting command completion
4961 * Read TP PIO Registers
4963 void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
4964 u32 start_index, bool sleep_ok)
4966 t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs,
4967 start_index, 1, sleep_ok);
4971 * t4_tp_pio_write - Write TP PIO registers
4972 * @adap: the adapter
4973 * @buff: where the indirect register values are stored
4974 * @nregs: how many indirect registers to write
4975 * @start_index: index of first indirect register to write
4976 * @sleep_ok: if true we may sleep while awaiting command completion
4978 * Write TP PIO Registers
4980 void t4_tp_pio_write(struct adapter *adap, const u32 *buff, u32 nregs,
4981 u32 start_index, bool sleep_ok)
4983 t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4984 __DECONST(u32 *, buff), nregs, start_index, 0, sleep_ok);
4988 * t4_tp_tm_pio_read - Read TP TM PIO registers
4989 * @adap: the adapter
4990 * @buff: where the indirect register values are written
4991 * @nregs: how many indirect registers to read
4992 * @start_index: index of first indirect register to read
4993 * @sleep_ok: if true we may sleep while awaiting command completion
4995 * Read TP TM PIO Registers
4997 void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
4998 u32 start_index, bool sleep_ok)
5000 t4_tp_indirect_rw(adap, A_TP_TM_PIO_ADDR, A_TP_TM_PIO_DATA, buff,
5001 nregs, start_index, 1, sleep_ok);
5005 * t4_tp_mib_read - Read TP MIB registers
5006 * @adap: the adapter
5007 * @buff: where the indirect register values are written
5008 * @nregs: how many indirect registers to read
5009 * @start_index: index of first indirect register to read
5010 * @sleep_ok: if true we may sleep while awaiting command completion
5012 * Read TP MIB Registers
5014 void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index,
5017 t4_tp_indirect_rw(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, buff, nregs,
5018 start_index, 1, sleep_ok);
5022 * t4_read_rss_key - read the global RSS key
5023 * @adap: the adapter
5024 * @key: 10-entry array holding the 320-bit RSS key
5025 * @sleep_ok: if true we may sleep while awaiting command completion
5027 * Reads the global 320-bit RSS key.
5029 void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok)
5031 t4_tp_pio_read(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
5035 * t4_write_rss_key - program one of the RSS keys
5036 * @adap: the adapter
5037 * @key: 10-entry array holding the 320-bit RSS key
5038 * @idx: which RSS key to write
5039 * @sleep_ok: if true we may sleep while awaiting command completion
5041 * Writes one of the RSS keys with the given 320-bit value. If @idx is
5042 * 0..15 the corresponding entry in the RSS key table is written,
5043 * otherwise the global RSS key is written.
5045 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
5048 u8 rss_key_addr_cnt = 16;
5049 u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
5052 * T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
5053 * allows access to key addresses 16-63 by using KeyWrAddrX
5054 * as index[5:4](upper 2) into key table
5056 if ((chip_id(adap) > CHELSIO_T5) &&
5057 (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
5058 rss_key_addr_cnt = 32;
5060 t4_tp_pio_write(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
5062 if (idx >= 0 && idx < rss_key_addr_cnt) {
5063 if (rss_key_addr_cnt > 16)
5064 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
5065 vrt | V_KEYWRADDRX(idx >> 4) |
5066 V_T6_VFWRADDR(idx) | F_KEYWREN);
5068 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
5069 vrt| V_KEYWRADDR(idx) | F_KEYWREN);
5074 * t4_read_rss_pf_config - read PF RSS Configuration Table
5075 * @adapter: the adapter
5076 * @index: the entry in the PF RSS table to read
5077 * @valp: where to store the returned value
5078 * @sleep_ok: if true we may sleep while awaiting command completion
5080 * Reads the PF RSS Configuration Table at the specified index and returns
5081 * the value found there.
5083 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
5084 u32 *valp, bool sleep_ok)
5086 t4_tp_pio_read(adapter, valp, 1, A_TP_RSS_PF0_CONFIG + index, sleep_ok);
5090 * t4_write_rss_pf_config - write PF RSS Configuration Table
5091 * @adapter: the adapter
5092 * @index: the entry in the VF RSS table to read
5093 * @val: the value to store
5094 * @sleep_ok: if true we may sleep while awaiting command completion
5096 * Writes the PF RSS Configuration Table at the specified index with the
5099 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index,
5100 u32 val, bool sleep_ok)
5102 t4_tp_pio_write(adapter, &val, 1, A_TP_RSS_PF0_CONFIG + index,
5107 * t4_read_rss_vf_config - read VF RSS Configuration Table
5108 * @adapter: the adapter
5109 * @index: the entry in the VF RSS table to read
5110 * @vfl: where to store the returned VFL
5111 * @vfh: where to store the returned VFH
5112 * @sleep_ok: if true we may sleep while awaiting command completion
5114 * Reads the VF RSS Configuration Table at the specified index and returns
5115 * the (VFL, VFH) values found there.
5117 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
5118 u32 *vfl, u32 *vfh, bool sleep_ok)
5120 u32 vrt, mask, data;
5122 if (chip_id(adapter) <= CHELSIO_T5) {
5123 mask = V_VFWRADDR(M_VFWRADDR);
5124 data = V_VFWRADDR(index);
5126 mask = V_T6_VFWRADDR(M_T6_VFWRADDR);
5127 data = V_T6_VFWRADDR(index);
5130 * Request that the index'th VF Table values be read into VFL/VFH.
5132 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
5133 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
5134 vrt |= data | F_VFRDEN;
5135 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
5138 * Grab the VFL/VFH values ...
5140 t4_tp_pio_read(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok);
5141 t4_tp_pio_read(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok);
5145 * t4_write_rss_vf_config - write VF RSS Configuration Table
5147 * @adapter: the adapter
5148 * @index: the entry in the VF RSS table to write
5149 * @vfl: the VFL to store
5150 * @vfh: the VFH to store
5152 * Writes the VF RSS Configuration Table at the specified index with the
5153 * specified (VFL, VFH) values.
5155 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
5156 u32 vfl, u32 vfh, bool sleep_ok)
5158 u32 vrt, mask, data;
5160 if (chip_id(adapter) <= CHELSIO_T5) {
5161 mask = V_VFWRADDR(M_VFWRADDR);
5162 data = V_VFWRADDR(index);
5164 mask = V_T6_VFWRADDR(M_T6_VFWRADDR);
5165 data = V_T6_VFWRADDR(index);
5169 * Load up VFL/VFH with the values to be written ...
5171 t4_tp_pio_write(adapter, &vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok);
5172 t4_tp_pio_write(adapter, &vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok);
5175 * Write the VFL/VFH into the VF Table at index'th location.
5177 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
5178 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
5179 vrt |= data | F_VFRDEN;
5180 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
5184 * t4_read_rss_pf_map - read PF RSS Map
5185 * @adapter: the adapter
5186 * @sleep_ok: if true we may sleep while awaiting command completion
5188 * Reads the PF RSS Map register and returns its value.
5190 u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok)
5194 t4_tp_pio_read(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok);
5200 * t4_write_rss_pf_map - write PF RSS Map
5201 * @adapter: the adapter
5202 * @pfmap: PF RSS Map value
5204 * Writes the specified value to the PF RSS Map register.
5206 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap, bool sleep_ok)
5208 t4_tp_pio_write(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok);
5212 * t4_read_rss_pf_mask - read PF RSS Mask
5213 * @adapter: the adapter
5214 * @sleep_ok: if true we may sleep while awaiting command completion
5216 * Reads the PF RSS Mask register and returns its value.
5218 u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok)
5222 t4_tp_pio_read(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok);
5228 * t4_write_rss_pf_mask - write PF RSS Mask
5229 * @adapter: the adapter
5230 * @pfmask: PF RSS Mask value
5232 * Writes the specified value to the PF RSS Mask register.
5234 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask, bool sleep_ok)
5236 t4_tp_pio_write(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok);
5240 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
5241 * @adap: the adapter
5242 * @v4: holds the TCP/IP counter values
5243 * @v6: holds the TCP/IPv6 counter values
5244 * @sleep_ok: if true we may sleep while awaiting command completion
5246 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
5247 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
5249 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
5250 struct tp_tcp_stats *v6, bool sleep_ok)
5252 u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
5254 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
5255 #define STAT(x) val[STAT_IDX(x)]
5256 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
5259 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5260 A_TP_MIB_TCP_OUT_RST, sleep_ok);
5261 v4->tcp_out_rsts = STAT(OUT_RST);
5262 v4->tcp_in_segs = STAT64(IN_SEG);
5263 v4->tcp_out_segs = STAT64(OUT_SEG);
5264 v4->tcp_retrans_segs = STAT64(RXT_SEG);
5267 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5268 A_TP_MIB_TCP_V6OUT_RST, sleep_ok);
5269 v6->tcp_out_rsts = STAT(OUT_RST);
5270 v6->tcp_in_segs = STAT64(IN_SEG);
5271 v6->tcp_out_segs = STAT64(OUT_SEG);
5272 v6->tcp_retrans_segs = STAT64(RXT_SEG);
5280 * t4_tp_get_err_stats - read TP's error MIB counters
5281 * @adap: the adapter
5282 * @st: holds the counter values
5283 * @sleep_ok: if true we may sleep while awaiting command completion
5285 * Returns the values of TP's error counters.
5287 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
5290 int nchan = adap->chip_params->nchan;
5292 t4_tp_mib_read(adap, st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0,
5295 t4_tp_mib_read(adap, st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0,
5298 t4_tp_mib_read(adap, st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0,
5301 t4_tp_mib_read(adap, st->tnl_cong_drops, nchan,
5302 A_TP_MIB_TNL_CNG_DROP_0, sleep_ok);
5304 t4_tp_mib_read(adap, st->ofld_chan_drops, nchan,
5305 A_TP_MIB_OFD_CHN_DROP_0, sleep_ok);
5307 t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0,
5310 t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan,
5311 A_TP_MIB_OFD_VLN_DROP_0, sleep_ok);
5313 t4_tp_mib_read(adap, st->tcp6_in_errs, nchan,
5314 A_TP_MIB_TCP_V6IN_ERR_0, sleep_ok);
5316 t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP,
5321 * t4_tp_get_proxy_stats - read TP's proxy MIB counters
5322 * @adap: the adapter
5323 * @st: holds the counter values
5325 * Returns the values of TP's proxy counters.
5327 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st,
5330 int nchan = adap->chip_params->nchan;
5332 t4_tp_mib_read(adap, st->proxy, nchan, A_TP_MIB_TNL_LPBK_0, sleep_ok);
5336 * t4_tp_get_cpl_stats - read TP's CPL MIB counters
5337 * @adap: the adapter
5338 * @st: holds the counter values
5339 * @sleep_ok: if true we may sleep while awaiting command completion
5341 * Returns the values of TP's CPL counters.
5343 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
5346 int nchan = adap->chip_params->nchan;
5348 t4_tp_mib_read(adap, st->req, nchan, A_TP_MIB_CPL_IN_REQ_0, sleep_ok);
5350 t4_tp_mib_read(adap, st->rsp, nchan, A_TP_MIB_CPL_OUT_RSP_0, sleep_ok);
5354 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
5355 * @adap: the adapter
5356 * @st: holds the counter values
5358 * Returns the values of TP's RDMA counters.
5360 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
5363 t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, A_TP_MIB_RQE_DFR_PKT,
5368 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
5369 * @adap: the adapter
5370 * @idx: the port index
5371 * @st: holds the counter values
5372 * @sleep_ok: if true we may sleep while awaiting command completion
5374 * Returns the values of TP's FCoE counters for the selected port.
5376 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
5377 struct tp_fcoe_stats *st, bool sleep_ok)
5381 t4_tp_mib_read(adap, &st->frames_ddp, 1, A_TP_MIB_FCOE_DDP_0 + idx,
5384 t4_tp_mib_read(adap, &st->frames_drop, 1,
5385 A_TP_MIB_FCOE_DROP_0 + idx, sleep_ok);
5387 t4_tp_mib_read(adap, val, 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx,
5390 st->octets_ddp = ((u64)val[0] << 32) | val[1];
5394 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
5395 * @adap: the adapter
5396 * @st: holds the counter values
5397 * @sleep_ok: if true we may sleep while awaiting command completion
5399 * Returns the values of TP's counters for non-TCP directly-placed packets.
5401 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
5406 t4_tp_mib_read(adap, val, 4, A_TP_MIB_USM_PKTS, sleep_ok);
5408 st->frames = val[0];
5410 st->octets = ((u64)val[2] << 32) | val[3];
5414 * t4_read_mtu_tbl - returns the values in the HW path MTU table
5415 * @adap: the adapter
5416 * @mtus: where to store the MTU values
5417 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
5419 * Reads the HW path MTU table.
5421 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
5426 for (i = 0; i < NMTUS; ++i) {
5427 t4_write_reg(adap, A_TP_MTU_TABLE,
5428 V_MTUINDEX(0xff) | V_MTUVALUE(i));
5429 v = t4_read_reg(adap, A_TP_MTU_TABLE);
5430 mtus[i] = G_MTUVALUE(v);
5432 mtu_log[i] = G_MTUWIDTH(v);
5437 * t4_read_cong_tbl - reads the congestion control table
5438 * @adap: the adapter
5439 * @incr: where to store the alpha values
5441 * Reads the additive increments programmed into the HW congestion
5444 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
5446 unsigned int mtu, w;
5448 for (mtu = 0; mtu < NMTUS; ++mtu)
5449 for (w = 0; w < NCCTRL_WIN; ++w) {
5450 t4_write_reg(adap, A_TP_CCTRL_TABLE,
5451 V_ROWINDEX(0xffff) | (mtu << 5) | w);
5452 incr[mtu][w] = (u16)t4_read_reg(adap,
5453 A_TP_CCTRL_TABLE) & 0x1fff;
5458 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
5459 * @adap: the adapter
5460 * @addr: the indirect TP register address
5461 * @mask: specifies the field within the register to modify
5462 * @val: new value for the field
5464 * Sets a field of an indirect TP register to the given value.
5466 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
5467 unsigned int mask, unsigned int val)
5469 t4_write_reg(adap, A_TP_PIO_ADDR, addr);
5470 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
5471 t4_write_reg(adap, A_TP_PIO_DATA, val);
5475 * init_cong_ctrl - initialize congestion control parameters
5476 * @a: the alpha values for congestion control
5477 * @b: the beta values for congestion control
5479 * Initialize the congestion control parameters.
5481 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
5483 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
5508 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
5511 b[13] = b[14] = b[15] = b[16] = 3;
5512 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
5513 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
5518 /* The minimum additive increment value for the congestion control table */
5519 #define CC_MIN_INCR 2U
5522 * t4_load_mtus - write the MTU and congestion control HW tables
5523 * @adap: the adapter
5524 * @mtus: the values for the MTU table
5525 * @alpha: the values for the congestion control alpha parameter
5526 * @beta: the values for the congestion control beta parameter
5528 * Write the HW MTU table with the supplied MTUs and the high-speed
5529 * congestion control table with the supplied alpha, beta, and MTUs.
5530 * We write the two tables together because the additive increments
5531 * depend on the MTUs.
5533 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
5534 const unsigned short *alpha, const unsigned short *beta)
5536 static const unsigned int avg_pkts[NCCTRL_WIN] = {
5537 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
5538 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
5539 28672, 40960, 57344, 81920, 114688, 163840, 229376
5544 for (i = 0; i < NMTUS; ++i) {
5545 unsigned int mtu = mtus[i];
5546 unsigned int log2 = fls(mtu);
5548 if (!(mtu & ((1 << log2) >> 2))) /* round */
5550 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
5551 V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
5553 for (w = 0; w < NCCTRL_WIN; ++w) {
5556 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
5559 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
5560 (w << 16) | (beta[w] << 13) | inc);
5566 * t4_set_pace_tbl - set the pace table
5567 * @adap: the adapter
5568 * @pace_vals: the pace values in microseconds
5569 * @start: index of the first entry in the HW pace table to set
5570 * @n: how many entries to set
5572 * Sets (a subset of the) HW pace table.
5574 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
5575 unsigned int start, unsigned int n)
5577 unsigned int vals[NTX_SCHED], i;
5578 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
5583 /* convert values from us to dack ticks, rounding to closest value */
5584 for (i = 0; i < n; i++, pace_vals++) {
5585 vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
5586 if (vals[i] > 0x7ff)
5588 if (*pace_vals && vals[i] == 0)
5591 for (i = 0; i < n; i++, start++)
5592 t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
5597 * t4_set_sched_bps - set the bit rate for a HW traffic scheduler
5598 * @adap: the adapter
5599 * @kbps: target rate in Kbps
5600 * @sched: the scheduler index
5602 * Configure a Tx HW scheduler for the target rate.
5604 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
5606 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
5607 unsigned int clk = adap->params.vpd.cclk * 1000;
5608 unsigned int selected_cpt = 0, selected_bpt = 0;
5611 kbps *= 125; /* -> bytes */
5612 for (cpt = 1; cpt <= 255; cpt++) {
5614 bpt = (kbps + tps / 2) / tps;
5615 if (bpt > 0 && bpt <= 255) {
5617 delta = v >= kbps ? v - kbps : kbps - v;
5618 if (delta < mindelta) {
5623 } else if (selected_cpt)
5629 t4_write_reg(adap, A_TP_TM_PIO_ADDR,
5630 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
5631 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
5633 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
5635 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
5636 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
5641 * t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
5642 * @adap: the adapter
5643 * @sched: the scheduler index
5644 * @ipg: the interpacket delay in tenths of nanoseconds
5646 * Set the interpacket delay for a HW packet rate scheduler.
5648 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
5650 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
5652 /* convert ipg to nearest number of core clocks */
5653 ipg *= core_ticks_per_usec(adap);
5654 ipg = (ipg + 5000) / 10000;
5655 if (ipg > M_TXTIMERSEPQ0)
5658 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
5659 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
5661 v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
5663 v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
5664 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
5665 t4_read_reg(adap, A_TP_TM_PIO_DATA);
5670 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
5671 * clocks. The formula is
5673 * bytes/s = bytes256 * 256 * ClkFreq / 4096
5675 * which is equivalent to
5677 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
5679 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
5681 u64 v = bytes256 * adap->params.vpd.cclk;
5683 return v * 62 + v / 2;
5687 * t4_get_chan_txrate - get the current per channel Tx rates
5688 * @adap: the adapter
5689 * @nic_rate: rates for NIC traffic
5690 * @ofld_rate: rates for offloaded traffic
5692 * Return the current Tx rates in bytes/s for NIC and offloaded traffic
5695 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
5699 v = t4_read_reg(adap, A_TP_TX_TRATE);
5700 nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
5701 nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
5702 if (adap->chip_params->nchan > 2) {
5703 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
5704 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
5707 v = t4_read_reg(adap, A_TP_TX_ORATE);
5708 ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
5709 ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
5710 if (adap->chip_params->nchan > 2) {
5711 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
5712 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
5717 * t4_set_trace_filter - configure one of the tracing filters
5718 * @adap: the adapter
5719 * @tp: the desired trace filter parameters
5720 * @idx: which filter to configure
5721 * @enable: whether to enable or disable the filter
5723 * Configures one of the tracing filters available in HW. If @tp is %NULL
5724 * it indicates that the filter is already written in the register and it
5725 * just needs to be enabled or disabled.
5727 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
5728 int idx, int enable)
5730 int i, ofst = idx * 4;
5731 u32 data_reg, mask_reg, cfg;
5732 u32 multitrc = F_TRCMULTIFILTER;
5733 u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN;
5735 if (idx < 0 || idx >= NTRACE)
5738 if (tp == NULL || !enable) {
5739 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en,
5745 * TODO - After T4 data book is updated, specify the exact
5748 * See T4 data book - MPS section for a complete description
5749 * of the below if..else handling of A_MPS_TRC_CFG register
5752 cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
5753 if (cfg & F_TRCMULTIFILTER) {
5755 * If multiple tracers are enabled, then maximum
5756 * capture size is 2.5KB (FIFO size of a single channel)
5757 * minus 2 flits for CPL_TRACE_PKT header.
5759 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
5763 * If multiple tracers are disabled, to avoid deadlocks
5764 * maximum packet capture size of 9600 bytes is recommended.
5765 * Also in this mode, only trace0 can be enabled and running.
5768 if (tp->snap_len > 9600 || idx)
5772 if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 ||
5773 tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET ||
5774 tp->min_len > M_TFMINPKTSIZE)
5777 /* stop the tracer we'll be changing */
5778 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0);
5780 idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
5781 data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
5782 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
5784 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5785 t4_write_reg(adap, data_reg, tp->data[i]);
5786 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
5788 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
5789 V_TFCAPTUREMAX(tp->snap_len) |
5790 V_TFMINPKTSIZE(tp->min_len));
5791 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
5792 V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en |
5794 V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) :
5795 V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert)));
5801 * t4_get_trace_filter - query one of the tracing filters
5802 * @adap: the adapter
5803 * @tp: the current trace filter parameters
5804 * @idx: which trace filter to query
5805 * @enabled: non-zero if the filter is enabled
5807 * Returns the current settings of one of the HW tracing filters.
5809 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
5813 int i, ofst = idx * 4;
5814 u32 data_reg, mask_reg;
5816 ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
5817 ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
5820 *enabled = !!(ctla & F_TFEN);
5821 tp->port = G_TFPORT(ctla);
5822 tp->invert = !!(ctla & F_TFINVERTMATCH);
5824 *enabled = !!(ctla & F_T5_TFEN);
5825 tp->port = G_T5_TFPORT(ctla);
5826 tp->invert = !!(ctla & F_T5_TFINVERTMATCH);
5828 tp->snap_len = G_TFCAPTUREMAX(ctlb);
5829 tp->min_len = G_TFMINPKTSIZE(ctlb);
5830 tp->skip_ofst = G_TFOFFSET(ctla);
5831 tp->skip_len = G_TFLENGTH(ctla);
5833 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
5834 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
5835 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
5837 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5838 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
5839 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
5844 * t4_pmtx_get_stats - returns the HW stats from PMTX
5845 * @adap: the adapter
5846 * @cnt: where to store the count statistics
5847 * @cycles: where to store the cycle statistics
5849 * Returns performance statistics from PMTX.
5851 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5856 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
5857 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
5858 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
5860 cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
5862 t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
5863 A_PM_TX_DBG_DATA, data, 2,
5864 A_PM_TX_DBG_STAT_MSB);
5865 cycles[i] = (((u64)data[0] << 32) | data[1]);
5871 * t4_pmrx_get_stats - returns the HW stats from PMRX
5872 * @adap: the adapter
5873 * @cnt: where to store the count statistics
5874 * @cycles: where to store the cycle statistics
5876 * Returns performance statistics from PMRX.
5878 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5883 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
5884 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
5885 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
5887 cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
5889 t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
5890 A_PM_RX_DBG_DATA, data, 2,
5891 A_PM_RX_DBG_STAT_MSB);
5892 cycles[i] = (((u64)data[0] << 32) | data[1]);
5898 * t4_get_mps_bg_map - return the buffer groups associated with a port
5899 * @adap: the adapter
5900 * @idx: the port index
5902 * Returns a bitmap indicating which MPS buffer groups are associated
5903 * with the given port. Bit i is set if buffer group i is used by the
5906 static unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
5910 if (adap->params.mps_bg_map)
5911 return ((adap->params.mps_bg_map >> (idx << 3)) & 0xff);
5913 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
5915 return idx == 0 ? 0xf : 0;
5916 if (n == 1 && chip_id(adap) <= CHELSIO_T5)
5917 return idx < 2 ? (3 << (2 * idx)) : 0;
5922 * TP RX e-channels associated with the port.
5924 static unsigned int t4_get_rx_e_chan_map(struct adapter *adap, int idx)
5926 u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
5929 return idx == 0 ? 0xf : 0;
5930 if (n == 1 && chip_id(adap) <= CHELSIO_T5)
5931 return idx < 2 ? (3 << (2 * idx)) : 0;
5936 * t4_get_port_type_description - return Port Type string description
5937 * @port_type: firmware Port Type enumeration
5939 const char *t4_get_port_type_description(enum fw_port_type port_type)
5941 static const char *const port_type_description[] = {
5966 if (port_type < ARRAY_SIZE(port_type_description))
5967 return port_type_description[port_type];
5972 * t4_get_port_stats_offset - collect port stats relative to a previous
5974 * @adap: The adapter
5976 * @stats: Current stats to fill
5977 * @offset: Previous stats snapshot
5979 void t4_get_port_stats_offset(struct adapter *adap, int idx,
5980 struct port_stats *stats,
5981 struct port_stats *offset)
5986 t4_get_port_stats(adap, idx, stats);
5987 for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
5988 i < (sizeof(struct port_stats)/sizeof(u64)) ;
5994 * t4_get_port_stats - collect port statistics
5995 * @adap: the adapter
5996 * @idx: the port index
5997 * @p: the stats structure to fill
5999 * Collect statistics related to the given port from HW.
6001 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
6003 u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map;
6004 u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
6006 #define GET_STAT(name) \
6007 t4_read_reg64(adap, \
6008 (is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
6009 T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
6010 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
6012 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
6013 p->tx_octets = GET_STAT(TX_PORT_BYTES);
6014 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
6015 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
6016 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
6017 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
6018 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
6019 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
6020 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
6021 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
6022 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
6023 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
6024 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
6025 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
6026 p->tx_drop = GET_STAT(TX_PORT_DROP);
6027 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
6028 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
6029 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
6030 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
6031 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
6032 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
6033 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
6034 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
6036 if (chip_id(adap) >= CHELSIO_T5) {
6037 if (stat_ctl & F_COUNTPAUSESTATTX) {
6038 p->tx_frames -= p->tx_pause;
6039 p->tx_octets -= p->tx_pause * 64;
6041 if (stat_ctl & F_COUNTPAUSEMCTX)
6042 p->tx_mcast_frames -= p->tx_pause;
6045 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
6046 p->rx_octets = GET_STAT(RX_PORT_BYTES);
6047 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
6048 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
6049 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
6050 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
6051 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
6052 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
6053 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
6054 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
6055 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
6056 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
6057 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
6058 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
6059 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
6060 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
6061 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
6062 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
6063 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
6064 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
6065 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
6066 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
6067 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
6068 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
6069 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
6070 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
6071 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
6073 if (chip_id(adap) >= CHELSIO_T5) {
6074 if (stat_ctl & F_COUNTPAUSESTATRX) {
6075 p->rx_frames -= p->rx_pause;
6076 p->rx_octets -= p->rx_pause * 64;
6078 if (stat_ctl & F_COUNTPAUSEMCRX)
6079 p->rx_mcast_frames -= p->rx_pause;
6082 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
6083 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
6084 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
6085 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
6086 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
6087 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
6088 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
6089 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
6096 * t4_get_lb_stats - collect loopback port statistics
6097 * @adap: the adapter
6098 * @idx: the loopback port index
6099 * @p: the stats structure to fill
6101 * Return HW statistics for the given loopback port.
6103 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
6105 u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map;
6107 #define GET_STAT(name) \
6108 t4_read_reg64(adap, \
6110 PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
6111 T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
6112 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
6114 p->octets = GET_STAT(BYTES);
6115 p->frames = GET_STAT(FRAMES);
6116 p->bcast_frames = GET_STAT(BCAST);
6117 p->mcast_frames = GET_STAT(MCAST);
6118 p->ucast_frames = GET_STAT(UCAST);
6119 p->error_frames = GET_STAT(ERROR);
6121 p->frames_64 = GET_STAT(64B);
6122 p->frames_65_127 = GET_STAT(65B_127B);
6123 p->frames_128_255 = GET_STAT(128B_255B);
6124 p->frames_256_511 = GET_STAT(256B_511B);
6125 p->frames_512_1023 = GET_STAT(512B_1023B);
6126 p->frames_1024_1518 = GET_STAT(1024B_1518B);
6127 p->frames_1519_max = GET_STAT(1519B_MAX);
6128 p->drop = GET_STAT(DROP_FRAMES);
6130 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
6131 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
6132 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
6133 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
6134 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
6135 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
6136 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
6137 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
6144 * t4_wol_magic_enable - enable/disable magic packet WoL
6145 * @adap: the adapter
6146 * @port: the physical port index
6147 * @addr: MAC address expected in magic packets, %NULL to disable
6149 * Enables/disables magic packet wake-on-LAN for the selected port.
6151 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
6154 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
6157 mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO);
6158 mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI);
6159 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
6161 mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO);
6162 mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI);
6163 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
6167 t4_write_reg(adap, mag_id_reg_l,
6168 (addr[2] << 24) | (addr[3] << 16) |
6169 (addr[4] << 8) | addr[5]);
6170 t4_write_reg(adap, mag_id_reg_h,
6171 (addr[0] << 8) | addr[1]);
6173 t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN,
6174 V_MAGICEN(addr != NULL));
6178 * t4_wol_pat_enable - enable/disable pattern-based WoL
6179 * @adap: the adapter
6180 * @port: the physical port index
6181 * @map: bitmap of which HW pattern filters to set
6182 * @mask0: byte mask for bytes 0-63 of a packet
6183 * @mask1: byte mask for bytes 64-127 of a packet
6184 * @crc: Ethernet CRC for selected bytes
6185 * @enable: enable/disable switch
6187 * Sets the pattern filters indicated in @map to mask out the bytes
6188 * specified in @mask0/@mask1 in received packets and compare the CRC of
6189 * the resulting packet against @crc. If @enable is %true pattern-based
6190 * WoL is enabled, otherwise disabled.
6192 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
6193 u64 mask0, u64 mask1, unsigned int crc, bool enable)
6199 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
6201 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
6204 t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
6210 #define EPIO_REG(name) \
6211 (is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \
6212 T5_PORT_REG(port, A_MAC_PORT_EPIO_##name))
6214 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
6215 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
6216 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
6218 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
6222 /* write byte masks */
6223 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
6224 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
6225 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
6226 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
6230 t4_write_reg(adap, EPIO_REG(DATA0), crc);
6231 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
6232 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
6233 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
6238 t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN);
6242 /* t4_mk_filtdelwr - create a delete filter WR
6243 * @ftid: the filter ID
6244 * @wr: the filter work request to populate
6245 * @qid: ingress queue to receive the delete notification
6247 * Creates a filter work request to delete the supplied filter. If @qid is
6248 * negative the delete notification is suppressed.
6250 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
6252 memset(wr, 0, sizeof(*wr));
6253 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
6254 wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
6255 wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
6256 V_FW_FILTER_WR_NOREPLY(qid < 0));
6257 wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
6259 wr->rx_chan_rx_rpl_iq =
6260 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
6263 #define INIT_CMD(var, cmd, rd_wr) do { \
6264 (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
6265 F_FW_CMD_REQUEST | \
6266 F_FW_CMD_##rd_wr); \
6267 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
6270 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
6274 struct fw_ldst_cmd c;
6276 memset(&c, 0, sizeof(c));
6277 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE);
6278 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6282 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6283 c.u.addrval.addr = cpu_to_be32(addr);
6284 c.u.addrval.val = cpu_to_be32(val);
6286 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6290 * t4_mdio_rd - read a PHY register through MDIO
6291 * @adap: the adapter
6292 * @mbox: mailbox to use for the FW command
6293 * @phy_addr: the PHY address
6294 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
6295 * @reg: the register to read
6296 * @valp: where to store the value
6298 * Issues a FW command through the given mailbox to read a PHY register.
6300 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6301 unsigned int mmd, unsigned int reg, unsigned int *valp)
6305 struct fw_ldst_cmd c;
6307 memset(&c, 0, sizeof(c));
6308 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
6309 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6310 F_FW_CMD_REQUEST | F_FW_CMD_READ |
6312 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6313 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
6314 V_FW_LDST_CMD_MMD(mmd));
6315 c.u.mdio.raddr = cpu_to_be16(reg);
6317 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6319 *valp = be16_to_cpu(c.u.mdio.rval);
6324 * t4_mdio_wr - write a PHY register through MDIO
6325 * @adap: the adapter
6326 * @mbox: mailbox to use for the FW command
6327 * @phy_addr: the PHY address
6328 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
6329 * @reg: the register to write
6330 * @valp: value to write
6332 * Issues a FW command through the given mailbox to write a PHY register.
6334 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6335 unsigned int mmd, unsigned int reg, unsigned int val)
6338 struct fw_ldst_cmd c;
6340 memset(&c, 0, sizeof(c));
6341 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
6342 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6343 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
6345 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6346 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
6347 V_FW_LDST_CMD_MMD(mmd));
6348 c.u.mdio.raddr = cpu_to_be16(reg);
6349 c.u.mdio.rval = cpu_to_be16(val);
6351 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6356 * t4_sge_decode_idma_state - decode the idma state
6357 * @adap: the adapter
6358 * @state: the state idma is stuck in
6360 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
6362 static const char * const t4_decode[] = {
6364 "IDMA_PUSH_MORE_CPL_FIFO",
6365 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6367 "IDMA_PHYSADDR_SEND_PCIEHDR",
6368 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6369 "IDMA_PHYSADDR_SEND_PAYLOAD",
6370 "IDMA_SEND_FIFO_TO_IMSG",
6371 "IDMA_FL_REQ_DATA_FL_PREP",
6372 "IDMA_FL_REQ_DATA_FL",
6374 "IDMA_FL_H_REQ_HEADER_FL",
6375 "IDMA_FL_H_SEND_PCIEHDR",
6376 "IDMA_FL_H_PUSH_CPL_FIFO",
6377 "IDMA_FL_H_SEND_CPL",
6378 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6379 "IDMA_FL_H_SEND_IP_HDR",
6380 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6381 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6382 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6383 "IDMA_FL_D_SEND_PCIEHDR",
6384 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6385 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6386 "IDMA_FL_SEND_PCIEHDR",
6387 "IDMA_FL_PUSH_CPL_FIFO",
6389 "IDMA_FL_SEND_PAYLOAD_FIRST",
6390 "IDMA_FL_SEND_PAYLOAD",
6391 "IDMA_FL_REQ_NEXT_DATA_FL",
6392 "IDMA_FL_SEND_NEXT_PCIEHDR",
6393 "IDMA_FL_SEND_PADDING",
6394 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6395 "IDMA_FL_SEND_FIFO_TO_IMSG",
6396 "IDMA_FL_REQ_DATAFL_DONE",
6397 "IDMA_FL_REQ_HEADERFL_DONE",
6399 static const char * const t5_decode[] = {
6402 "IDMA_PUSH_MORE_CPL_FIFO",
6403 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6404 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6405 "IDMA_PHYSADDR_SEND_PCIEHDR",
6406 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6407 "IDMA_PHYSADDR_SEND_PAYLOAD",
6408 "IDMA_SEND_FIFO_TO_IMSG",
6409 "IDMA_FL_REQ_DATA_FL",
6411 "IDMA_FL_DROP_SEND_INC",
6412 "IDMA_FL_H_REQ_HEADER_FL",
6413 "IDMA_FL_H_SEND_PCIEHDR",
6414 "IDMA_FL_H_PUSH_CPL_FIFO",
6415 "IDMA_FL_H_SEND_CPL",
6416 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6417 "IDMA_FL_H_SEND_IP_HDR",
6418 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6419 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6420 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6421 "IDMA_FL_D_SEND_PCIEHDR",
6422 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6423 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6424 "IDMA_FL_SEND_PCIEHDR",
6425 "IDMA_FL_PUSH_CPL_FIFO",
6427 "IDMA_FL_SEND_PAYLOAD_FIRST",
6428 "IDMA_FL_SEND_PAYLOAD",
6429 "IDMA_FL_REQ_NEXT_DATA_FL",
6430 "IDMA_FL_SEND_NEXT_PCIEHDR",
6431 "IDMA_FL_SEND_PADDING",
6432 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6434 static const char * const t6_decode[] = {
6436 "IDMA_PUSH_MORE_CPL_FIFO",
6437 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6438 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6439 "IDMA_PHYSADDR_SEND_PCIEHDR",
6440 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6441 "IDMA_PHYSADDR_SEND_PAYLOAD",
6442 "IDMA_FL_REQ_DATA_FL",
6444 "IDMA_FL_DROP_SEND_INC",
6445 "IDMA_FL_H_REQ_HEADER_FL",
6446 "IDMA_FL_H_SEND_PCIEHDR",
6447 "IDMA_FL_H_PUSH_CPL_FIFO",
6448 "IDMA_FL_H_SEND_CPL",
6449 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6450 "IDMA_FL_H_SEND_IP_HDR",
6451 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6452 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6453 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6454 "IDMA_FL_D_SEND_PCIEHDR",
6455 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6456 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6457 "IDMA_FL_SEND_PCIEHDR",
6458 "IDMA_FL_PUSH_CPL_FIFO",
6460 "IDMA_FL_SEND_PAYLOAD_FIRST",
6461 "IDMA_FL_SEND_PAYLOAD",
6462 "IDMA_FL_REQ_NEXT_DATA_FL",
6463 "IDMA_FL_SEND_NEXT_PCIEHDR",
6464 "IDMA_FL_SEND_PADDING",
6465 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6467 static const u32 sge_regs[] = {
6468 A_SGE_DEBUG_DATA_LOW_INDEX_2,
6469 A_SGE_DEBUG_DATA_LOW_INDEX_3,
6470 A_SGE_DEBUG_DATA_HIGH_INDEX_10,
6472 const char * const *sge_idma_decode;
6473 int sge_idma_decode_nstates;
6475 unsigned int chip_version = chip_id(adapter);
6477 /* Select the right set of decode strings to dump depending on the
6478 * adapter chip type.
6480 switch (chip_version) {
6482 sge_idma_decode = (const char * const *)t4_decode;
6483 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
6487 sge_idma_decode = (const char * const *)t5_decode;
6488 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
6492 sge_idma_decode = (const char * const *)t6_decode;
6493 sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
6497 CH_ERR(adapter, "Unsupported chip version %d\n", chip_version);
6501 if (state < sge_idma_decode_nstates)
6502 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
6504 CH_WARN(adapter, "idma state %d unknown\n", state);
6506 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
6507 CH_WARN(adapter, "SGE register %#x value %#x\n",
6508 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
6512 * t4_sge_ctxt_flush - flush the SGE context cache
6513 * @adap: the adapter
6514 * @mbox: mailbox to use for the FW command
6516 * Issues a FW command through the given mailbox to flush the
6517 * SGE context cache.
6519 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
6523 struct fw_ldst_cmd c;
6525 memset(&c, 0, sizeof(c));
6526 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC);
6527 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6528 F_FW_CMD_REQUEST | F_FW_CMD_READ |
6530 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6531 c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH);
6533 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6538 * t4_fw_hello - establish communication with FW
6539 * @adap: the adapter
6540 * @mbox: mailbox to use for the FW command
6541 * @evt_mbox: mailbox to receive async FW events
6542 * @master: specifies the caller's willingness to be the device master
6543 * @state: returns the current device state (if non-NULL)
6545 * Issues a command to establish communication with FW. Returns either
6546 * an error (negative integer) or the mailbox of the Master PF.
6548 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
6549 enum dev_master master, enum dev_state *state)
6552 struct fw_hello_cmd c;
6554 unsigned int master_mbox;
6555 int retries = FW_CMD_HELLO_RETRIES;
6558 memset(&c, 0, sizeof(c));
6559 INIT_CMD(c, HELLO, WRITE);
6560 c.err_to_clearinit = cpu_to_be32(
6561 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
6562 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
6563 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ?
6564 mbox : M_FW_HELLO_CMD_MBMASTER) |
6565 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
6566 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
6567 F_FW_HELLO_CMD_CLEARINIT);
6570 * Issue the HELLO command to the firmware. If it's not successful
6571 * but indicates that we got a "busy" or "timeout" condition, retry
6572 * the HELLO until we exhaust our retry limit. If we do exceed our
6573 * retry limit, check to see if the firmware left us any error
6574 * information and report that if so ...
6576 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6577 if (ret != FW_SUCCESS) {
6578 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
6580 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
6581 t4_report_fw_error(adap);
6585 v = be32_to_cpu(c.err_to_clearinit);
6586 master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
6588 if (v & F_FW_HELLO_CMD_ERR)
6589 *state = DEV_STATE_ERR;
6590 else if (v & F_FW_HELLO_CMD_INIT)
6591 *state = DEV_STATE_INIT;
6593 *state = DEV_STATE_UNINIT;
6597 * If we're not the Master PF then we need to wait around for the
6598 * Master PF Driver to finish setting up the adapter.
6600 * Note that we also do this wait if we're a non-Master-capable PF and
6601 * there is no current Master PF; a Master PF may show up momentarily
6602 * and we wouldn't want to fail pointlessly. (This can happen when an
6603 * OS loads lots of different drivers rapidly at the same time). In
6604 * this case, the Master PF returned by the firmware will be
6605 * M_PCIE_FW_MASTER so the test below will work ...
6607 if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
6608 master_mbox != mbox) {
6609 int waiting = FW_CMD_HELLO_TIMEOUT;
6612 * Wait for the firmware to either indicate an error or
6613 * initialized state. If we see either of these we bail out
6614 * and report the issue to the caller. If we exhaust the
6615 * "hello timeout" and we haven't exhausted our retries, try
6616 * again. Otherwise bail with a timeout error.
6625 * If neither Error nor Initialialized are indicated
6626 * by the firmware keep waiting till we exhaust our
6627 * timeout ... and then retry if we haven't exhausted
6630 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
6631 if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
6642 * We either have an Error or Initialized condition
6643 * report errors preferentially.
6646 if (pcie_fw & F_PCIE_FW_ERR)
6647 *state = DEV_STATE_ERR;
6648 else if (pcie_fw & F_PCIE_FW_INIT)
6649 *state = DEV_STATE_INIT;
6653 * If we arrived before a Master PF was selected and
6654 * there's not a valid Master PF, grab its identity
6657 if (master_mbox == M_PCIE_FW_MASTER &&
6658 (pcie_fw & F_PCIE_FW_MASTER_VLD))
6659 master_mbox = G_PCIE_FW_MASTER(pcie_fw);
6668 * t4_fw_bye - end communication with FW
6669 * @adap: the adapter
6670 * @mbox: mailbox to use for the FW command
6672 * Issues a command to terminate communication with FW.
6674 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
6676 struct fw_bye_cmd c;
6678 memset(&c, 0, sizeof(c));
6679 INIT_CMD(c, BYE, WRITE);
6680 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6684 * t4_fw_reset - issue a reset to FW
6685 * @adap: the adapter
6686 * @mbox: mailbox to use for the FW command
6687 * @reset: specifies the type of reset to perform
6689 * Issues a reset command of the specified type to FW.
6691 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
6693 struct fw_reset_cmd c;
6695 memset(&c, 0, sizeof(c));
6696 INIT_CMD(c, RESET, WRITE);
6697 c.val = cpu_to_be32(reset);
6698 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6702 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
6703 * @adap: the adapter
6704 * @mbox: mailbox to use for the FW RESET command (if desired)
6705 * @force: force uP into RESET even if FW RESET command fails
6707 * Issues a RESET command to firmware (if desired) with a HALT indication
6708 * and then puts the microprocessor into RESET state. The RESET command
6709 * will only be issued if a legitimate mailbox is provided (mbox <=
6710 * M_PCIE_FW_MASTER).
6712 * This is generally used in order for the host to safely manipulate the
6713 * adapter without fear of conflicting with whatever the firmware might
6714 * be doing. The only way out of this state is to RESTART the firmware
6717 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
6722 * If a legitimate mailbox is provided, issue a RESET command
6723 * with a HALT indication.
6725 if (mbox <= M_PCIE_FW_MASTER) {
6726 struct fw_reset_cmd c;
6728 memset(&c, 0, sizeof(c));
6729 INIT_CMD(c, RESET, WRITE);
6730 c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
6731 c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
6732 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6736 * Normally we won't complete the operation if the firmware RESET
6737 * command fails but if our caller insists we'll go ahead and put the
6738 * uP into RESET. This can be useful if the firmware is hung or even
6739 * missing ... We'll have to take the risk of putting the uP into
6740 * RESET without the cooperation of firmware in that case.
6742 * We also force the firmware's HALT flag to be on in case we bypassed
6743 * the firmware RESET command above or we're dealing with old firmware
6744 * which doesn't have the HALT capability. This will serve as a flag
6745 * for the incoming firmware to know that it's coming out of a HALT
6746 * rather than a RESET ... if it's new enough to understand that ...
6748 if (ret == 0 || force) {
6749 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
6750 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
6755 * And we always return the result of the firmware RESET command
6756 * even when we force the uP into RESET ...
6762 * t4_fw_restart - restart the firmware by taking the uP out of RESET
6763 * @adap: the adapter
6764 * @reset: if we want to do a RESET to restart things
6766 * Restart firmware previously halted by t4_fw_halt(). On successful
6767 * return the previous PF Master remains as the new PF Master and there
6768 * is no need to issue a new HELLO command, etc.
6770 * We do this in two ways:
6772 * 1. If we're dealing with newer firmware we'll simply want to take
6773 * the chip's microprocessor out of RESET. This will cause the
6774 * firmware to start up from its start vector. And then we'll loop
6775 * until the firmware indicates it's started again (PCIE_FW.HALT
6776 * reset to 0) or we timeout.
6778 * 2. If we're dealing with older firmware then we'll need to RESET
6779 * the chip since older firmware won't recognize the PCIE_FW.HALT
6780 * flag and automatically RESET itself on startup.
6782 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
6786 * Since we're directing the RESET instead of the firmware
6787 * doing it automatically, we need to clear the PCIE_FW.HALT
6790 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
6793 * If we've been given a valid mailbox, first try to get the
6794 * firmware to do the RESET. If that works, great and we can
6795 * return success. Otherwise, if we haven't been given a
6796 * valid mailbox or the RESET command failed, fall back to
6797 * hitting the chip with a hammer.
6799 if (mbox <= M_PCIE_FW_MASTER) {
6800 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
6802 if (t4_fw_reset(adap, mbox,
6803 F_PIORST | F_PIORSTMODE) == 0)
6807 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
6812 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
6813 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
6814 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
6825 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
6826 * @adap: the adapter
6827 * @mbox: mailbox to use for the FW RESET command (if desired)
6828 * @fw_data: the firmware image to write
6830 * @force: force upgrade even if firmware doesn't cooperate
6832 * Perform all of the steps necessary for upgrading an adapter's
6833 * firmware image. Normally this requires the cooperation of the
6834 * existing firmware in order to halt all existing activities
6835 * but if an invalid mailbox token is passed in we skip that step
6836 * (though we'll still put the adapter microprocessor into RESET in
6839 * On successful return the new firmware will have been loaded and
6840 * the adapter will have been fully RESET losing all previous setup
6841 * state. On unsuccessful return the adapter may be completely hosed ...
6842 * positive errno indicates that the adapter is ~probably~ intact, a
6843 * negative errno indicates that things are looking bad ...
6845 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
6846 const u8 *fw_data, unsigned int size, int force)
6848 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
6849 unsigned int bootstrap =
6850 be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
6853 if (!t4_fw_matches_chip(adap, fw_hdr))
6857 ret = t4_fw_halt(adap, mbox, force);
6858 if (ret < 0 && !force)
6862 ret = t4_load_fw(adap, fw_data, size);
6863 if (ret < 0 || bootstrap)
6867 * Older versions of the firmware don't understand the new
6868 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
6869 * restart. So for newly loaded older firmware we'll have to do the
6870 * RESET for it so it starts up on a clean slate. We can tell if
6871 * the newly loaded firmware will handle this right by checking
6872 * its header flags to see if it advertises the capability.
6874 reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
6875 return t4_fw_restart(adap, mbox, reset);
6879 * Card doesn't have a firmware, install one.
6881 int t4_fw_forceinstall(struct adapter *adap, const u8 *fw_data,
6884 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
6885 unsigned int bootstrap =
6886 be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
6889 if (!t4_fw_matches_chip(adap, fw_hdr) || bootstrap)
6892 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
6893 t4_write_reg(adap, A_PCIE_FW, 0); /* Clobber internal state */
6894 ret = t4_load_fw(adap, fw_data, size);
6897 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
6904 * t4_fw_initialize - ask FW to initialize the device
6905 * @adap: the adapter
6906 * @mbox: mailbox to use for the FW command
6908 * Issues a command to FW to partially initialize the device. This
6909 * performs initialization that generally doesn't depend on user input.
6911 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
6913 struct fw_initialize_cmd c;
6915 memset(&c, 0, sizeof(c));
6916 INIT_CMD(c, INITIALIZE, WRITE);
6917 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6921 * t4_query_params_rw - query FW or device parameters
6922 * @adap: the adapter
6923 * @mbox: mailbox to use for the FW command
6926 * @nparams: the number of parameters
6927 * @params: the parameter names
6928 * @val: the parameter values
6929 * @rw: Write and read flag
6931 * Reads the value of FW or device parameters. Up to 7 parameters can be
6934 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
6935 unsigned int vf, unsigned int nparams, const u32 *params,
6939 struct fw_params_cmd c;
6940 __be32 *p = &c.param[0].mnem;
6945 memset(&c, 0, sizeof(c));
6946 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
6947 F_FW_CMD_REQUEST | F_FW_CMD_READ |
6948 V_FW_PARAMS_CMD_PFN(pf) |
6949 V_FW_PARAMS_CMD_VFN(vf));
6950 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6952 for (i = 0; i < nparams; i++) {
6953 *p++ = cpu_to_be32(*params++);
6955 *p = cpu_to_be32(*(val + i));
6959 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6961 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
6962 *val++ = be32_to_cpu(*p);
6966 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
6967 unsigned int vf, unsigned int nparams, const u32 *params,
6970 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
6974 * t4_set_params_timeout - sets FW or device parameters
6975 * @adap: the adapter
6976 * @mbox: mailbox to use for the FW command
6979 * @nparams: the number of parameters
6980 * @params: the parameter names
6981 * @val: the parameter values
6982 * @timeout: the timeout time
6984 * Sets the value of FW or device parameters. Up to 7 parameters can be
6985 * specified at once.
6987 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
6988 unsigned int pf, unsigned int vf,
6989 unsigned int nparams, const u32 *params,
6990 const u32 *val, int timeout)
6992 struct fw_params_cmd c;
6993 __be32 *p = &c.param[0].mnem;
6998 memset(&c, 0, sizeof(c));
6999 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
7000 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7001 V_FW_PARAMS_CMD_PFN(pf) |
7002 V_FW_PARAMS_CMD_VFN(vf));
7003 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7006 *p++ = cpu_to_be32(*params++);
7007 *p++ = cpu_to_be32(*val++);
7010 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
7014 * t4_set_params - sets FW or device parameters
7015 * @adap: the adapter
7016 * @mbox: mailbox to use for the FW command
7019 * @nparams: the number of parameters
7020 * @params: the parameter names
7021 * @val: the parameter values
7023 * Sets the value of FW or device parameters. Up to 7 parameters can be
7024 * specified at once.
7026 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7027 unsigned int vf, unsigned int nparams, const u32 *params,
7030 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
7031 FW_CMD_MAX_TIMEOUT);
7035 * t4_cfg_pfvf - configure PF/VF resource limits
7036 * @adap: the adapter
7037 * @mbox: mailbox to use for the FW command
7038 * @pf: the PF being configured
7039 * @vf: the VF being configured
7040 * @txq: the max number of egress queues
7041 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
7042 * @rxqi: the max number of interrupt-capable ingress queues
7043 * @rxq: the max number of interruptless ingress queues
7044 * @tc: the PCI traffic class
7045 * @vi: the max number of virtual interfaces
7046 * @cmask: the channel access rights mask for the PF/VF
7047 * @pmask: the port access rights mask for the PF/VF
7048 * @nexact: the maximum number of exact MPS filters
7049 * @rcaps: read capabilities
7050 * @wxcaps: write/execute capabilities
7052 * Configures resource limits and capabilities for a physical or virtual
7055 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
7056 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
7057 unsigned int rxqi, unsigned int rxq, unsigned int tc,
7058 unsigned int vi, unsigned int cmask, unsigned int pmask,
7059 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
7061 struct fw_pfvf_cmd c;
7063 memset(&c, 0, sizeof(c));
7064 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
7065 F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
7066 V_FW_PFVF_CMD_VFN(vf));
7067 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7068 c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
7069 V_FW_PFVF_CMD_NIQ(rxq));
7070 c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) |
7071 V_FW_PFVF_CMD_PMASK(pmask) |
7072 V_FW_PFVF_CMD_NEQ(txq));
7073 c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) |
7074 V_FW_PFVF_CMD_NVI(vi) |
7075 V_FW_PFVF_CMD_NEXACTF(nexact));
7076 c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) |
7077 V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
7078 V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
7079 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7083 * t4_alloc_vi_func - allocate a virtual interface
7084 * @adap: the adapter
7085 * @mbox: mailbox to use for the FW command
7086 * @port: physical port associated with the VI
7087 * @pf: the PF owning the VI
7088 * @vf: the VF owning the VI
7089 * @nmac: number of MAC addresses needed (1 to 5)
7090 * @mac: the MAC addresses of the VI
7091 * @rss_size: size of RSS table slice associated with this VI
7092 * @portfunc: which Port Application Function MAC Address is desired
7093 * @idstype: Intrusion Detection Type
7095 * Allocates a virtual interface for the given physical port. If @mac is
7096 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
7097 * If @rss_size is %NULL the VI is not assigned any RSS slice by FW.
7098 * @mac should be large enough to hold @nmac Ethernet addresses, they are
7099 * stored consecutively so the space needed is @nmac * 6 bytes.
7100 * Returns a negative error number or the non-negative VI id.
7102 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
7103 unsigned int port, unsigned int pf, unsigned int vf,
7104 unsigned int nmac, u8 *mac, u16 *rss_size,
7105 unsigned int portfunc, unsigned int idstype)
7110 memset(&c, 0, sizeof(c));
7111 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
7112 F_FW_CMD_WRITE | F_FW_CMD_EXEC |
7113 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
7114 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
7115 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
7116 V_FW_VI_CMD_FUNC(portfunc));
7117 c.portid_pkd = V_FW_VI_CMD_PORTID(port);
7120 c.norss_rsssize = F_FW_VI_CMD_NORSS;
7122 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7127 memcpy(mac, c.mac, sizeof(c.mac));
7130 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
7132 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
7134 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
7136 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
7140 *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
7141 return G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid));
7145 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface
7146 * @adap: the adapter
7147 * @mbox: mailbox to use for the FW command
7148 * @port: physical port associated with the VI
7149 * @pf: the PF owning the VI
7150 * @vf: the VF owning the VI
7151 * @nmac: number of MAC addresses needed (1 to 5)
7152 * @mac: the MAC addresses of the VI
7153 * @rss_size: size of RSS table slice associated with this VI
7155 * backwards compatible and convieniance routine to allocate a Virtual
7156 * Interface with a Ethernet Port Application Function and Intrustion
7157 * Detection System disabled.
7159 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
7160 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
7163 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
7168 * t4_free_vi - free a virtual interface
7169 * @adap: the adapter
7170 * @mbox: mailbox to use for the FW command
7171 * @pf: the PF owning the VI
7172 * @vf: the VF owning the VI
7173 * @viid: virtual interface identifiler
7175 * Free a previously allocated virtual interface.
7177 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
7178 unsigned int vf, unsigned int viid)
7182 memset(&c, 0, sizeof(c));
7183 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
7186 V_FW_VI_CMD_PFN(pf) |
7187 V_FW_VI_CMD_VFN(vf));
7188 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
7189 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
7191 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7195 * t4_set_rxmode - set Rx properties of a virtual interface
7196 * @adap: the adapter
7197 * @mbox: mailbox to use for the FW command
7199 * @mtu: the new MTU or -1
7200 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
7201 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
7202 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
7203 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
7204 * @sleep_ok: if true we may sleep while awaiting command completion
7206 * Sets Rx properties of a virtual interface.
7208 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
7209 int mtu, int promisc, int all_multi, int bcast, int vlanex,
7212 struct fw_vi_rxmode_cmd c;
7214 /* convert to FW values */
7216 mtu = M_FW_VI_RXMODE_CMD_MTU;
7218 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
7220 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
7222 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
7224 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
7226 memset(&c, 0, sizeof(c));
7227 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
7228 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7229 V_FW_VI_RXMODE_CMD_VIID(viid));
7230 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7232 cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
7233 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
7234 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
7235 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
7236 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
7237 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7241 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
7242 * @adap: the adapter
7243 * @mbox: mailbox to use for the FW command
7245 * @free: if true any existing filters for this VI id are first removed
7246 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
7247 * @addr: the MAC address(es)
7248 * @idx: where to store the index of each allocated filter
7249 * @hash: pointer to hash address filter bitmap
7250 * @sleep_ok: call is allowed to sleep
7252 * Allocates an exact-match filter for each of the supplied addresses and
7253 * sets it to the corresponding address. If @idx is not %NULL it should
7254 * have at least @naddr entries, each of which will be set to the index of
7255 * the filter allocated for the corresponding MAC address. If a filter
7256 * could not be allocated for an address its index is set to 0xffff.
7257 * If @hash is not %NULL addresses that fail to allocate an exact filter
7258 * are hashed and update the hash filter bitmap pointed at by @hash.
7260 * Returns a negative error number or the number of filters allocated.
7262 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
7263 unsigned int viid, bool free, unsigned int naddr,
7264 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
7266 int offset, ret = 0;
7267 struct fw_vi_mac_cmd c;
7268 unsigned int nfilters = 0;
7269 unsigned int max_naddr = adap->chip_params->mps_tcam_size;
7270 unsigned int rem = naddr;
7272 if (naddr > max_naddr)
7275 for (offset = 0; offset < naddr ; /**/) {
7276 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
7278 : ARRAY_SIZE(c.u.exact));
7279 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
7280 u.exact[fw_naddr]), 16);
7281 struct fw_vi_mac_exact *p;
7284 memset(&c, 0, sizeof(c));
7285 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
7288 V_FW_CMD_EXEC(free) |
7289 V_FW_VI_MAC_CMD_VIID(viid));
7290 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) |
7291 V_FW_CMD_LEN16(len16));
7293 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7295 cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
7296 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
7297 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
7301 * It's okay if we run out of space in our MAC address arena.
7302 * Some of the addresses we submit may get stored so we need
7303 * to run through the reply to see what the results were ...
7305 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7306 if (ret && ret != -FW_ENOMEM)
7309 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7310 u16 index = G_FW_VI_MAC_CMD_IDX(
7311 be16_to_cpu(p->valid_to_idx));
7314 idx[offset+i] = (index >= max_naddr
7317 if (index < max_naddr)
7320 *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
7328 if (ret == 0 || ret == -FW_ENOMEM)
7334 * t4_change_mac - modifies the exact-match filter for a MAC address
7335 * @adap: the adapter
7336 * @mbox: mailbox to use for the FW command
7338 * @idx: index of existing filter for old value of MAC address, or -1
7339 * @addr: the new MAC address value
7340 * @persist: whether a new MAC allocation should be persistent
7341 * @add_smt: if true also add the address to the HW SMT
7343 * Modifies an exact-match filter and sets it to the new MAC address if
7344 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
7345 * latter case the address is added persistently if @persist is %true.
7347 * Note that in general it is not possible to modify the value of a given
7348 * filter so the generic way to modify an address filter is to free the one
7349 * being used by the old address value and allocate a new filter for the
7350 * new address value.
7352 * Returns a negative error number or the index of the filter with the new
7353 * MAC value. Note that this index may differ from @idx.
7355 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
7356 int idx, const u8 *addr, bool persist, bool add_smt)
7359 struct fw_vi_mac_cmd c;
7360 struct fw_vi_mac_exact *p = c.u.exact;
7361 unsigned int max_mac_addr = adap->chip_params->mps_tcam_size;
7363 if (idx < 0) /* new allocation */
7364 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
7365 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
7367 memset(&c, 0, sizeof(c));
7368 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
7369 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7370 V_FW_VI_MAC_CMD_VIID(viid));
7371 c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1));
7372 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
7373 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
7374 V_FW_VI_MAC_CMD_IDX(idx));
7375 memcpy(p->macaddr, addr, sizeof(p->macaddr));
7377 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7379 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
7380 if (ret >= max_mac_addr)
7387 * t4_set_addr_hash - program the MAC inexact-match hash filter
7388 * @adap: the adapter
7389 * @mbox: mailbox to use for the FW command
7391 * @ucast: whether the hash filter should also match unicast addresses
7392 * @vec: the value to be written to the hash filter
7393 * @sleep_ok: call is allowed to sleep
7395 * Sets the 64-bit inexact-match hash filter for a virtual interface.
7397 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
7398 bool ucast, u64 vec, bool sleep_ok)
7400 struct fw_vi_mac_cmd c;
7403 memset(&c, 0, sizeof(c));
7404 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
7405 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7406 V_FW_VI_ENABLE_CMD_VIID(viid));
7407 val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) |
7408 V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1);
7409 c.freemacs_to_len16 = cpu_to_be32(val);
7410 c.u.hash.hashvec = cpu_to_be64(vec);
7411 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7415 * t4_enable_vi_params - enable/disable a virtual interface
7416 * @adap: the adapter
7417 * @mbox: mailbox to use for the FW command
7419 * @rx_en: 1=enable Rx, 0=disable Rx
7420 * @tx_en: 1=enable Tx, 0=disable Tx
7421 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
7423 * Enables/disables a virtual interface. Note that setting DCB Enable
7424 * only makes sense when enabling a Virtual Interface ...
7426 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
7427 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
7429 struct fw_vi_enable_cmd c;
7431 memset(&c, 0, sizeof(c));
7432 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
7433 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7434 V_FW_VI_ENABLE_CMD_VIID(viid));
7435 c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
7436 V_FW_VI_ENABLE_CMD_EEN(tx_en) |
7437 V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
7439 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
7443 * t4_enable_vi - enable/disable a virtual interface
7444 * @adap: the adapter
7445 * @mbox: mailbox to use for the FW command
7447 * @rx_en: 1=enable Rx, 0=disable Rx
7448 * @tx_en: 1=enable Tx, 0=disable Tx
7450 * Enables/disables a virtual interface. Note that setting DCB Enable
7451 * only makes sense when enabling a Virtual Interface ...
7453 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
7454 bool rx_en, bool tx_en)
7456 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
7460 * t4_identify_port - identify a VI's port by blinking its LED
7461 * @adap: the adapter
7462 * @mbox: mailbox to use for the FW command
7464 * @nblinks: how many times to blink LED at 2.5 Hz
7466 * Identifies a VI's port by blinking its LED.
7468 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
7469 unsigned int nblinks)
7471 struct fw_vi_enable_cmd c;
7473 memset(&c, 0, sizeof(c));
7474 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
7475 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7476 V_FW_VI_ENABLE_CMD_VIID(viid));
7477 c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
7478 c.blinkdur = cpu_to_be16(nblinks);
7479 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7483 * t4_iq_stop - stop an ingress queue and its FLs
7484 * @adap: the adapter
7485 * @mbox: mailbox to use for the FW command
7486 * @pf: the PF owning the queues
7487 * @vf: the VF owning the queues
7488 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
7489 * @iqid: ingress queue id
7490 * @fl0id: FL0 queue id or 0xffff if no attached FL0
7491 * @fl1id: FL1 queue id or 0xffff if no attached FL1
7493 * Stops an ingress queue and its associated FLs, if any. This causes
7494 * any current or future data/messages destined for these queues to be
7497 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
7498 unsigned int vf, unsigned int iqtype, unsigned int iqid,
7499 unsigned int fl0id, unsigned int fl1id)
7503 memset(&c, 0, sizeof(c));
7504 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
7505 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
7506 V_FW_IQ_CMD_VFN(vf));
7507 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c));
7508 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
7509 c.iqid = cpu_to_be16(iqid);
7510 c.fl0id = cpu_to_be16(fl0id);
7511 c.fl1id = cpu_to_be16(fl1id);
7512 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7516 * t4_iq_free - free an ingress queue and its FLs
7517 * @adap: the adapter
7518 * @mbox: mailbox to use for the FW command
7519 * @pf: the PF owning the queues
7520 * @vf: the VF owning the queues
7521 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
7522 * @iqid: ingress queue id
7523 * @fl0id: FL0 queue id or 0xffff if no attached FL0
7524 * @fl1id: FL1 queue id or 0xffff if no attached FL1
7526 * Frees an ingress queue and its associated FLs, if any.
7528 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7529 unsigned int vf, unsigned int iqtype, unsigned int iqid,
7530 unsigned int fl0id, unsigned int fl1id)
7534 memset(&c, 0, sizeof(c));
7535 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
7536 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
7537 V_FW_IQ_CMD_VFN(vf));
7538 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
7539 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
7540 c.iqid = cpu_to_be16(iqid);
7541 c.fl0id = cpu_to_be16(fl0id);
7542 c.fl1id = cpu_to_be16(fl1id);
7543 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7547 * t4_eth_eq_free - free an Ethernet egress queue
7548 * @adap: the adapter
7549 * @mbox: mailbox to use for the FW command
7550 * @pf: the PF owning the queue
7551 * @vf: the VF owning the queue
7552 * @eqid: egress queue id
7554 * Frees an Ethernet egress queue.
7556 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7557 unsigned int vf, unsigned int eqid)
7559 struct fw_eq_eth_cmd c;
7561 memset(&c, 0, sizeof(c));
7562 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
7563 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7564 V_FW_EQ_ETH_CMD_PFN(pf) |
7565 V_FW_EQ_ETH_CMD_VFN(vf));
7566 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
7567 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
7568 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7572 * t4_ctrl_eq_free - free a control egress queue
7573 * @adap: the adapter
7574 * @mbox: mailbox to use for the FW command
7575 * @pf: the PF owning the queue
7576 * @vf: the VF owning the queue
7577 * @eqid: egress queue id
7579 * Frees a control egress queue.
7581 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7582 unsigned int vf, unsigned int eqid)
7584 struct fw_eq_ctrl_cmd c;
7586 memset(&c, 0, sizeof(c));
7587 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
7588 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7589 V_FW_EQ_CTRL_CMD_PFN(pf) |
7590 V_FW_EQ_CTRL_CMD_VFN(vf));
7591 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
7592 c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
7593 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7597 * t4_ofld_eq_free - free an offload egress queue
7598 * @adap: the adapter
7599 * @mbox: mailbox to use for the FW command
7600 * @pf: the PF owning the queue
7601 * @vf: the VF owning the queue
7602 * @eqid: egress queue id
7604 * Frees a control egress queue.
7606 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7607 unsigned int vf, unsigned int eqid)
7609 struct fw_eq_ofld_cmd c;
7611 memset(&c, 0, sizeof(c));
7612 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) |
7613 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7614 V_FW_EQ_OFLD_CMD_PFN(pf) |
7615 V_FW_EQ_OFLD_CMD_VFN(vf));
7616 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
7617 c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid));
7618 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7622 * t4_link_down_rc_str - return a string for a Link Down Reason Code
7623 * @link_down_rc: Link Down Reason Code
7625 * Returns a string representation of the Link Down Reason Code.
7627 const char *t4_link_down_rc_str(unsigned char link_down_rc)
7629 static const char *reason[] = {
7632 "Auto-negotiation Failure",
7634 "Insufficient Airflow",
7635 "Unable To Determine Reason",
7636 "No RX Signal Detected",
7640 if (link_down_rc >= ARRAY_SIZE(reason))
7641 return "Bad Reason Code";
7643 return reason[link_down_rc];
7647 * Updates all fields owned by the common code in port_info and link_config
7648 * based on information provided by the firmware. Does not touch any
7649 * requested_* field.
7651 static void handle_port_info(struct port_info *pi, const struct fw_port_info *p)
7653 struct link_config *lc = &pi->link_cfg;
7655 unsigned char fc, fec;
7656 u32 stat = be32_to_cpu(p->lstatus_to_modtype);
7658 pi->port_type = G_FW_PORT_CMD_PTYPE(stat);
7659 pi->mod_type = G_FW_PORT_CMD_MODTYPE(stat);
7660 pi->mdio_addr = stat & F_FW_PORT_CMD_MDIOCAP ?
7661 G_FW_PORT_CMD_MDIOADDR(stat) : -1;
7663 lc->supported = be16_to_cpu(p->pcap);
7664 lc->advertising = be16_to_cpu(p->acap);
7665 lc->lp_advertising = be16_to_cpu(p->lpacap);
7666 lc->link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
7667 lc->link_down_rc = G_FW_PORT_CMD_LINKDNRC(stat);
7670 if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
7672 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
7674 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
7676 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
7678 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
7680 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
7685 if (stat & F_FW_PORT_CMD_RXPAUSE)
7687 if (stat & F_FW_PORT_CMD_TXPAUSE)
7692 if (lc->advertising & FW_PORT_CAP_FEC_RS)
7694 if (lc->advertising & FW_PORT_CAP_FEC_BASER_RS)
7695 fec |= FEC_BASER_RS;
7696 if (lc->advertising & FW_PORT_CAP_FEC_RESERVED)
7697 fec |= FEC_RESERVED;
7702 * t4_update_port_info - retrieve and update port information if changed
7703 * @pi: the port_info
7705 * We issue a Get Port Information Command to the Firmware and, if
7706 * successful, we check to see if anything is different from what we
7707 * last recorded and update things accordingly.
7709 int t4_update_port_info(struct port_info *pi)
7711 struct fw_port_cmd port_cmd;
7714 memset(&port_cmd, 0, sizeof port_cmd);
7715 port_cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
7716 F_FW_CMD_REQUEST | F_FW_CMD_READ |
7717 V_FW_PORT_CMD_PORTID(pi->tx_chan));
7718 port_cmd.action_to_len16 = cpu_to_be32(
7719 V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
7720 FW_LEN16(port_cmd));
7721 ret = t4_wr_mbox_ns(pi->adapter, pi->adapter->mbox,
7722 &port_cmd, sizeof(port_cmd), &port_cmd);
7726 handle_port_info(pi, &port_cmd.u.info);
7731 * t4_handle_fw_rpl - process a FW reply message
7732 * @adap: the adapter
7733 * @rpl: start of the FW message
7735 * Processes a FW message, such as link state change messages.
7737 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
7739 u8 opcode = *(const u8 *)rpl;
7740 const struct fw_port_cmd *p = (const void *)rpl;
7741 unsigned int action =
7742 G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
7744 if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
7745 /* link/module state change message */
7746 int i, old_ptype, old_mtype;
7747 int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
7748 struct port_info *pi = NULL;
7749 struct link_config *lc, *old_lc;
7751 for_each_port(adap, i) {
7752 pi = adap2pinfo(adap, i);
7753 if (pi->tx_chan == chan)
7758 old_lc = &pi->old_link_cfg;
7759 old_ptype = pi->port_type;
7760 old_mtype = pi->mod_type;
7762 handle_port_info(pi, &p->u.info);
7763 if (old_ptype != pi->port_type || old_mtype != pi->mod_type) {
7764 t4_os_portmod_changed(pi);
7766 if (old_lc->link_ok != lc->link_ok ||
7767 old_lc->speed != lc->speed ||
7768 old_lc->fec != lc->fec ||
7769 old_lc->fc != lc->fc) {
7770 t4_os_link_changed(pi);
7774 CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode);
7781 * get_pci_mode - determine a card's PCI mode
7782 * @adapter: the adapter
7783 * @p: where to store the PCI settings
7785 * Determines a card's PCI mode and associated parameters, such as speed
7788 static void get_pci_mode(struct adapter *adapter,
7789 struct pci_params *p)
7794 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
7796 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
7797 p->speed = val & PCI_EXP_LNKSTA_CLS;
7798 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
7803 u32 vendor_and_model_id;
7807 int t4_get_flash_params(struct adapter *adapter)
7810 * Table for non-standard supported Flash parts. Note, all Flash
7811 * parts must have 64KB sectors.
7813 static struct flash_desc supported_flash[] = {
7814 { 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
7819 unsigned int part, manufacturer;
7820 unsigned int density, size;
7824 * Issue a Read ID Command to the Flash part. We decode supported
7825 * Flash parts and their sizes from this. There's a newer Query
7826 * Command which can retrieve detailed geometry information but many
7827 * Flash parts don't support it.
7829 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
7831 ret = sf1_read(adapter, 3, 0, 1, &flashid);
7832 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
7837 * Check to see if it's one of our non-standard supported Flash parts.
7839 for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
7840 if (supported_flash[part].vendor_and_model_id == flashid) {
7841 adapter->params.sf_size =
7842 supported_flash[part].size_mb;
7843 adapter->params.sf_nsec =
7844 adapter->params.sf_size / SF_SEC_SIZE;
7849 * Decode Flash part size. The code below looks repetative with
7850 * common encodings, but that's not guaranteed in the JEDEC
7851 * specification for the Read JADEC ID command. The only thing that
7852 * we're guaranteed by the JADEC specification is where the
7853 * Manufacturer ID is in the returned result. After that each
7854 * Manufacturer ~could~ encode things completely differently.
7855 * Note, all Flash parts must have 64KB sectors.
7857 manufacturer = flashid & 0xff;
7858 switch (manufacturer) {
7859 case 0x20: { /* Micron/Numonix */
7861 * This Density -> Size decoding table is taken from Micron
7864 density = (flashid >> 16) & 0xff;
7866 case 0x14: size = 1 << 20; break; /* 1MB */
7867 case 0x15: size = 1 << 21; break; /* 2MB */
7868 case 0x16: size = 1 << 22; break; /* 4MB */
7869 case 0x17: size = 1 << 23; break; /* 8MB */
7870 case 0x18: size = 1 << 24; break; /* 16MB */
7871 case 0x19: size = 1 << 25; break; /* 32MB */
7872 case 0x20: size = 1 << 26; break; /* 64MB */
7873 case 0x21: size = 1 << 27; break; /* 128MB */
7874 case 0x22: size = 1 << 28; break; /* 256MB */
7877 CH_ERR(adapter, "Micron Flash Part has bad size, "
7878 "ID = %#x, Density code = %#x\n",
7885 case 0xef: { /* Winbond */
7887 * This Density -> Size decoding table is taken from Winbond
7890 density = (flashid >> 16) & 0xff;
7892 case 0x17: size = 1 << 23; break; /* 8MB */
7893 case 0x18: size = 1 << 24; break; /* 16MB */
7896 CH_ERR(adapter, "Winbond Flash Part has bad size, "
7897 "ID = %#x, Density code = %#x\n",
7905 CH_ERR(adapter, "Unsupported Flash Part, ID = %#x\n", flashid);
7910 * Store decoded Flash size and fall through into vetting code.
7912 adapter->params.sf_size = size;
7913 adapter->params.sf_nsec = size / SF_SEC_SIZE;
7917 * We should ~probably~ reject adapters with FLASHes which are too
7918 * small but we have some legacy FPGAs with small FLASHes that we'd
7919 * still like to use. So instead we emit a scary message ...
7921 if (adapter->params.sf_size < FLASH_MIN_SIZE)
7922 CH_WARN(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
7923 flashid, adapter->params.sf_size, FLASH_MIN_SIZE);
7928 static void set_pcie_completion_timeout(struct adapter *adapter,
7934 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
7936 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
7939 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
7943 const struct chip_params *t4_get_chip_params(int chipid)
7945 static const struct chip_params chip_params[] = {
7949 .pm_stats_cnt = PM_NSTATS,
7950 .cng_ch_bits_log = 2,
7952 .cim_num_obq = CIM_NUM_OBQ,
7953 .mps_rplc_size = 128,
7955 .sge_fl_db = F_DBPRIO,
7956 .mps_tcam_size = NUM_MPS_CLS_SRAM_L_INSTANCES,
7961 .pm_stats_cnt = PM_NSTATS,
7962 .cng_ch_bits_log = 2,
7964 .cim_num_obq = CIM_NUM_OBQ_T5,
7965 .mps_rplc_size = 128,
7967 .sge_fl_db = F_DBPRIO | F_DBTYPE,
7968 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
7973 .pm_stats_cnt = T6_PM_NSTATS,
7974 .cng_ch_bits_log = 3,
7976 .cim_num_obq = CIM_NUM_OBQ_T5,
7977 .mps_rplc_size = 256,
7980 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
7984 chipid -= CHELSIO_T4;
7985 if (chipid < 0 || chipid >= ARRAY_SIZE(chip_params))
7988 return &chip_params[chipid];
7992 * t4_prep_adapter - prepare SW and HW for operation
7993 * @adapter: the adapter
7994 * @buf: temporary space of at least VPD_LEN size provided by the caller.
7996 * Initialize adapter SW state for the various HW modules, set initial
7997 * values for some adapter tunables, take PHYs out of reset, and
7998 * initialize the MDIO interface.
8000 int t4_prep_adapter(struct adapter *adapter, u8 *buf)
8006 get_pci_mode(adapter, &adapter->params.pci);
8008 pl_rev = t4_read_reg(adapter, A_PL_REV);
8009 adapter->params.chipid = G_CHIPID(pl_rev);
8010 adapter->params.rev = G_REV(pl_rev);
8011 if (adapter->params.chipid == 0) {
8012 /* T4 did not have chipid in PL_REV (T5 onwards do) */
8013 adapter->params.chipid = CHELSIO_T4;
8015 /* T4A1 chip is not supported */
8016 if (adapter->params.rev == 1) {
8017 CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n");
8022 adapter->chip_params = t4_get_chip_params(chip_id(adapter));
8023 if (adapter->chip_params == NULL)
8026 adapter->params.pci.vpd_cap_addr =
8027 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
8029 ret = t4_get_flash_params(adapter);
8033 ret = get_vpd_params(adapter, &adapter->params.vpd, buf);
8037 /* Cards with real ASICs have the chipid in the PCIe device id */
8038 t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id);
8039 if (device_id >> 12 == chip_id(adapter))
8040 adapter->params.cim_la_size = CIMLA_SIZE;
8043 adapter->params.fpga = 1;
8044 adapter->params.cim_la_size = 2 * CIMLA_SIZE;
8047 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
8050 * Default port and clock for debugging in case we can't reach FW.
8052 adapter->params.nports = 1;
8053 adapter->params.portvec = 1;
8054 adapter->params.vpd.cclk = 50000;
8056 /* Set pci completion timeout value to 4 seconds. */
8057 set_pcie_completion_timeout(adapter, 0xd);
8062 * t4_shutdown_adapter - shut down adapter, host & wire
8063 * @adapter: the adapter
8065 * Perform an emergency shutdown of the adapter and stop it from
8066 * continuing any further communication on the ports or DMA to the
8067 * host. This is typically used when the adapter and/or firmware
8068 * have crashed and we want to prevent any further accidental
8069 * communication with the rest of the world. This will also force
8070 * the port Link Status to go down -- if register writes work --
8071 * which should help our peers figure out that we're down.
8073 int t4_shutdown_adapter(struct adapter *adapter)
8077 t4_intr_disable(adapter);
8078 t4_write_reg(adapter, A_DBG_GPIO_EN, 0);
8079 for_each_port(adapter, port) {
8080 u32 a_port_cfg = is_t4(adapter) ?
8081 PORT_REG(port, A_XGMAC_PORT_CFG) :
8082 T5_PORT_REG(port, A_MAC_PORT_CFG);
8084 t4_write_reg(adapter, a_port_cfg,
8085 t4_read_reg(adapter, a_port_cfg)
8086 & ~V_SIGNAL_DET(1));
8088 t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0);
8094 * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
8095 * @adapter: the adapter
8096 * @qid: the Queue ID
8097 * @qtype: the Ingress or Egress type for @qid
8098 * @user: true if this request is for a user mode queue
8099 * @pbar2_qoffset: BAR2 Queue Offset
8100 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
8102 * Returns the BAR2 SGE Queue Registers information associated with the
8103 * indicated Absolute Queue ID. These are passed back in return value
8104 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
8105 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
8107 * This may return an error which indicates that BAR2 SGE Queue
8108 * registers aren't available. If an error is not returned, then the
8109 * following values are returned:
8111 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
8112 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
8114 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
8115 * require the "Inferred Queue ID" ability may be used. E.g. the
8116 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
8117 * then these "Inferred Queue ID" register may not be used.
8119 int t4_bar2_sge_qregs(struct adapter *adapter,
8121 enum t4_bar2_qtype qtype,
8124 unsigned int *pbar2_qid)
8126 unsigned int page_shift, page_size, qpp_shift, qpp_mask;
8127 u64 bar2_page_offset, bar2_qoffset;
8128 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
8130 /* T4 doesn't support BAR2 SGE Queue registers for kernel
8133 if (!user && is_t4(adapter))
8136 /* Get our SGE Page Size parameters.
8138 page_shift = adapter->params.sge.page_shift;
8139 page_size = 1 << page_shift;
8141 /* Get the right Queues per Page parameters for our Queue.
8143 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
8144 ? adapter->params.sge.eq_s_qpp
8145 : adapter->params.sge.iq_s_qpp);
8146 qpp_mask = (1 << qpp_shift) - 1;
8148 /* Calculate the basics of the BAR2 SGE Queue register area:
8149 * o The BAR2 page the Queue registers will be in.
8150 * o The BAR2 Queue ID.
8151 * o The BAR2 Queue ID Offset into the BAR2 page.
8153 bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
8154 bar2_qid = qid & qpp_mask;
8155 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
8157 /* If the BAR2 Queue ID Offset is less than the Page Size, then the
8158 * hardware will infer the Absolute Queue ID simply from the writes to
8159 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
8160 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
8161 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
8162 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
8163 * from the BAR2 Page and BAR2 Queue ID.
8165 * One important censequence of this is that some BAR2 SGE registers
8166 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
8167 * there. But other registers synthesize the SGE Queue ID purely
8168 * from the writes to the registers -- the Write Combined Doorbell
8169 * Buffer is a good example. These BAR2 SGE Registers are only
8170 * available for those BAR2 SGE Register areas where the SGE Absolute
8171 * Queue ID can be inferred from simple writes.
8173 bar2_qoffset = bar2_page_offset;
8174 bar2_qinferred = (bar2_qid_offset < page_size);
8175 if (bar2_qinferred) {
8176 bar2_qoffset += bar2_qid_offset;
8180 *pbar2_qoffset = bar2_qoffset;
8181 *pbar2_qid = bar2_qid;
8186 * t4_init_devlog_params - initialize adapter->params.devlog
8187 * @adap: the adapter
8188 * @fw_attach: whether we can talk to the firmware
8190 * Initialize various fields of the adapter's Firmware Device Log
8191 * Parameters structure.
8193 int t4_init_devlog_params(struct adapter *adap, int fw_attach)
8195 struct devlog_params *dparams = &adap->params.devlog;
8197 unsigned int devlog_meminfo;
8198 struct fw_devlog_cmd devlog_cmd;
8201 /* If we're dealing with newer firmware, the Device Log Paramerters
8202 * are stored in a designated register which allows us to access the
8203 * Device Log even if we can't talk to the firmware.
8206 t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG));
8208 unsigned int nentries, nentries128;
8210 dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams);
8211 dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4;
8213 nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams);
8214 nentries = (nentries128 + 1) * 128;
8215 dparams->size = nentries * sizeof(struct fw_devlog_e);
8221 * For any failing returns ...
8223 memset(dparams, 0, sizeof *dparams);
8226 * If we can't talk to the firmware, there's really nothing we can do
8232 /* Otherwise, ask the firmware for it's Device Log Parameters.
8234 memset(&devlog_cmd, 0, sizeof devlog_cmd);
8235 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
8236 F_FW_CMD_REQUEST | F_FW_CMD_READ);
8237 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
8238 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
8244 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
8245 dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo);
8246 dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4;
8247 dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
8253 * t4_init_sge_params - initialize adap->params.sge
8254 * @adapter: the adapter
8256 * Initialize various fields of the adapter's SGE Parameters structure.
8258 int t4_init_sge_params(struct adapter *adapter)
8261 struct sge_params *sp = &adapter->params.sge;
8262 unsigned i, tscale = 1;
8264 r = t4_read_reg(adapter, A_SGE_INGRESS_RX_THRESHOLD);
8265 sp->counter_val[0] = G_THRESHOLD_0(r);
8266 sp->counter_val[1] = G_THRESHOLD_1(r);
8267 sp->counter_val[2] = G_THRESHOLD_2(r);
8268 sp->counter_val[3] = G_THRESHOLD_3(r);
8270 if (chip_id(adapter) >= CHELSIO_T6) {
8271 r = t4_read_reg(adapter, A_SGE_ITP_CONTROL);
8272 tscale = G_TSCALE(r);
8279 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_0_AND_1);
8280 sp->timer_val[0] = core_ticks_to_us(adapter, G_TIMERVALUE0(r)) * tscale;
8281 sp->timer_val[1] = core_ticks_to_us(adapter, G_TIMERVALUE1(r)) * tscale;
8282 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_2_AND_3);
8283 sp->timer_val[2] = core_ticks_to_us(adapter, G_TIMERVALUE2(r)) * tscale;
8284 sp->timer_val[3] = core_ticks_to_us(adapter, G_TIMERVALUE3(r)) * tscale;
8285 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_4_AND_5);
8286 sp->timer_val[4] = core_ticks_to_us(adapter, G_TIMERVALUE4(r)) * tscale;
8287 sp->timer_val[5] = core_ticks_to_us(adapter, G_TIMERVALUE5(r)) * tscale;
8289 r = t4_read_reg(adapter, A_SGE_CONM_CTRL);
8290 sp->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1;
8292 sp->fl_starve_threshold2 = sp->fl_starve_threshold;
8293 else if (is_t5(adapter))
8294 sp->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(r) * 2 + 1;
8296 sp->fl_starve_threshold2 = G_T6_EGRTHRESHOLDPACKING(r) * 2 + 1;
8298 /* egress queues: log2 of # of doorbells per BAR2 page */
8299 r = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
8300 r >>= S_QUEUESPERPAGEPF0 +
8301 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
8302 sp->eq_s_qpp = r & M_QUEUESPERPAGEPF0;
8304 /* ingress queues: log2 of # of doorbells per BAR2 page */
8305 r = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
8306 r >>= S_QUEUESPERPAGEPF0 +
8307 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
8308 sp->iq_s_qpp = r & M_QUEUESPERPAGEPF0;
8310 r = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
8311 r >>= S_HOSTPAGESIZEPF0 +
8312 (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf;
8313 sp->page_shift = (r & M_HOSTPAGESIZEPF0) + 10;
8315 r = t4_read_reg(adapter, A_SGE_CONTROL);
8316 sp->sge_control = r;
8317 sp->spg_len = r & F_EGRSTATUSPAGESIZE ? 128 : 64;
8318 sp->fl_pktshift = G_PKTSHIFT(r);
8319 if (chip_id(adapter) <= CHELSIO_T5) {
8320 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) +
8321 X_INGPADBOUNDARY_SHIFT);
8323 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) +
8324 X_T6_INGPADBOUNDARY_SHIFT);
8327 sp->pack_boundary = sp->pad_boundary;
8329 r = t4_read_reg(adapter, A_SGE_CONTROL2);
8330 if (G_INGPACKBOUNDARY(r) == 0)
8331 sp->pack_boundary = 16;
8333 sp->pack_boundary = 1 << (G_INGPACKBOUNDARY(r) + 5);
8335 for (i = 0; i < SGE_FLBUF_SIZES; i++)
8336 sp->sge_fl_buffer_size[i] = t4_read_reg(adapter,
8337 A_SGE_FL_BUFFER_SIZE0 + (4 * i));
8343 * Read and cache the adapter's compressed filter mode and ingress config.
8345 static void read_filter_mode_and_ingress_config(struct adapter *adap,
8348 struct tp_params *tpp = &adap->params.tp;
8350 t4_tp_pio_read(adap, &tpp->vlan_pri_map, 1, A_TP_VLAN_PRI_MAP,
8352 t4_tp_pio_read(adap, &tpp->ingress_config, 1, A_TP_INGRESS_CONFIG,
8356 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
8357 * shift positions of several elements of the Compressed Filter Tuple
8358 * for this adapter which we need frequently ...
8360 tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE);
8361 tpp->port_shift = t4_filter_field_shift(adap, F_PORT);
8362 tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
8363 tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN);
8364 tpp->tos_shift = t4_filter_field_shift(adap, F_TOS);
8365 tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
8366 tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE);
8367 tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH);
8368 tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE);
8369 tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION);
8372 * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
8373 * represents the presence of an Outer VLAN instead of a VNIC ID.
8375 if ((tpp->ingress_config & F_VNIC) == 0)
8376 tpp->vnic_shift = -1;
8380 * t4_init_tp_params - initialize adap->params.tp
8381 * @adap: the adapter
8383 * Initialize various fields of the adapter's TP Parameters structure.
8385 int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
8389 struct tp_params *tpp = &adap->params.tp;
8391 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
8392 tpp->tre = G_TIMERRESOLUTION(v);
8393 tpp->dack_re = G_DELAYEDACKRESOLUTION(v);
8395 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
8396 for (chan = 0; chan < MAX_NCHAN; chan++)
8397 tpp->tx_modq[chan] = chan;
8399 read_filter_mode_and_ingress_config(adap, sleep_ok);
8402 * Cache a mask of the bits that represent the error vector portion of
8403 * rx_pkt.err_vec. T6+ can use a compressed error vector to make room
8404 * for information about outer encapsulation (GENEVE/VXLAN/NVGRE).
8406 tpp->err_vec_mask = htobe16(0xffff);
8407 if (chip_id(adap) > CHELSIO_T5) {
8408 v = t4_read_reg(adap, A_TP_OUT_CONFIG);
8409 if (v & F_CRXPKTENC) {
8411 htobe16(V_T6_COMPR_RXERR_VEC(M_T6_COMPR_RXERR_VEC));
8419 * t4_filter_field_shift - calculate filter field shift
8420 * @adap: the adapter
8421 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
8423 * Return the shift position of a filter field within the Compressed
8424 * Filter Tuple. The filter field is specified via its selection bit
8425 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
8427 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
8429 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
8433 if ((filter_mode & filter_sel) == 0)
8436 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
8437 switch (filter_mode & sel) {
8439 field_shift += W_FT_FCOE;
8442 field_shift += W_FT_PORT;
8445 field_shift += W_FT_VNIC_ID;
8448 field_shift += W_FT_VLAN;
8451 field_shift += W_FT_TOS;
8454 field_shift += W_FT_PROTOCOL;
8457 field_shift += W_FT_ETHERTYPE;
8460 field_shift += W_FT_MACMATCH;
8463 field_shift += W_FT_MPSHITTYPE;
8465 case F_FRAGMENTATION:
8466 field_shift += W_FT_FRAGMENTATION;
8473 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id)
8478 struct port_info *p = adap2pinfo(adap, port_id);
8481 for (i = 0, j = -1; i <= p->port_id; i++) {
8484 } while ((adap->params.portvec & (1 << j)) == 0);
8487 if (!(adap->flags & IS_VF) ||
8488 adap->params.vfres.r_caps & FW_CMD_CAP_PORT) {
8489 t4_update_port_info(p);
8492 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
8496 p->vi[0].viid = ret;
8497 if (chip_id(adap) <= CHELSIO_T5)
8498 p->vi[0].smt_idx = (ret & 0x7f) << 1;
8500 p->vi[0].smt_idx = (ret & 0x7f);
8502 p->mps_bg_map = t4_get_mps_bg_map(adap, j);
8503 p->rx_e_chan_map = t4_get_rx_e_chan_map(adap, j);
8505 p->vi[0].rss_size = rss_size;
8506 t4_os_set_hw_addr(p, addr);
8508 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
8509 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
8510 V_FW_PARAMS_PARAM_YZ(p->vi[0].viid);
8511 ret = t4_query_params(adap, mbox, pf, vf, 1, ¶m, &val);
8513 p->vi[0].rss_base = 0xffff;
8515 /* MPASS((val >> 16) == rss_size); */
8516 p->vi[0].rss_base = val & 0xffff;
8523 * t4_read_cimq_cfg - read CIM queue configuration
8524 * @adap: the adapter
8525 * @base: holds the queue base addresses in bytes
8526 * @size: holds the queue sizes in bytes
8527 * @thres: holds the queue full thresholds in bytes
8529 * Returns the current configuration of the CIM queues, starting with
8530 * the IBQs, then the OBQs.
8532 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
8535 int cim_num_obq = adap->chip_params->cim_num_obq;
8537 for (i = 0; i < CIM_NUM_IBQ; i++) {
8538 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
8540 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
8541 /* value is in 256-byte units */
8542 *base++ = G_CIMQBASE(v) * 256;
8543 *size++ = G_CIMQSIZE(v) * 256;
8544 *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
8546 for (i = 0; i < cim_num_obq; i++) {
8547 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
8549 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
8550 /* value is in 256-byte units */
8551 *base++ = G_CIMQBASE(v) * 256;
8552 *size++ = G_CIMQSIZE(v) * 256;
8557 * t4_read_cim_ibq - read the contents of a CIM inbound queue
8558 * @adap: the adapter
8559 * @qid: the queue index
8560 * @data: where to store the queue contents
8561 * @n: capacity of @data in 32-bit words
8563 * Reads the contents of the selected CIM queue starting at address 0 up
8564 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
8565 * error and the number of 32-bit words actually read on success.
8567 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
8569 int i, err, attempts;
8571 const unsigned int nwords = CIM_IBQ_SIZE * 4;
8573 if (qid > 5 || (n & 3))
8576 addr = qid * nwords;
8580 /* It might take 3-10ms before the IBQ debug read access is allowed.
8581 * Wait for 1 Sec with a delay of 1 usec.
8585 for (i = 0; i < n; i++, addr++) {
8586 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
8588 err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
8592 *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
8594 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
8599 * t4_read_cim_obq - read the contents of a CIM outbound queue
8600 * @adap: the adapter
8601 * @qid: the queue index
8602 * @data: where to store the queue contents
8603 * @n: capacity of @data in 32-bit words
8605 * Reads the contents of the selected CIM queue starting at address 0 up
8606 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
8607 * error and the number of 32-bit words actually read on success.
8609 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
8612 unsigned int addr, v, nwords;
8613 int cim_num_obq = adap->chip_params->cim_num_obq;
8615 if ((qid > (cim_num_obq - 1)) || (n & 3))
8618 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
8619 V_QUENUMSELECT(qid));
8620 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
8622 addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */
8623 nwords = G_CIMQSIZE(v) * 64; /* same */
8627 for (i = 0; i < n; i++, addr++) {
8628 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
8630 err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
8634 *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
8636 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
8642 CIM_CTL_BASE = 0x2000,
8643 CIM_PBT_ADDR_BASE = 0x2800,
8644 CIM_PBT_LRF_BASE = 0x3000,
8645 CIM_PBT_DATA_BASE = 0x3800
8649 * t4_cim_read - read a block from CIM internal address space
8650 * @adap: the adapter
8651 * @addr: the start address within the CIM address space
8652 * @n: number of words to read
8653 * @valp: where to store the result
8655 * Reads a block of 4-byte words from the CIM intenal address space.
8657 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
8662 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
8665 for ( ; !ret && n--; addr += 4) {
8666 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
8667 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
8670 *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
8676 * t4_cim_write - write a block into CIM internal address space
8677 * @adap: the adapter
8678 * @addr: the start address within the CIM address space
8679 * @n: number of words to write
8680 * @valp: set of values to write
8682 * Writes a block of 4-byte words into the CIM intenal address space.
8684 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
8685 const unsigned int *valp)
8689 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
8692 for ( ; !ret && n--; addr += 4) {
8693 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
8694 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
8695 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
8701 static int t4_cim_write1(struct adapter *adap, unsigned int addr,
8704 return t4_cim_write(adap, addr, 1, &val);
8708 * t4_cim_ctl_read - read a block from CIM control region
8709 * @adap: the adapter
8710 * @addr: the start address within the CIM control region
8711 * @n: number of words to read
8712 * @valp: where to store the result
8714 * Reads a block of 4-byte words from the CIM control region.
8716 int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
8719 return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
8723 * t4_cim_read_la - read CIM LA capture buffer
8724 * @adap: the adapter
8725 * @la_buf: where to store the LA data
8726 * @wrptr: the HW write pointer within the capture buffer
8728 * Reads the contents of the CIM LA buffer with the most recent entry at
8729 * the end of the returned data and with the entry at @wrptr first.
8730 * We try to leave the LA in the running state we find it in.
8732 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
8735 unsigned int cfg, val, idx;
8737 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
8741 if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */
8742 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
8747 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
8751 idx = G_UPDBGLAWRPTR(val);
8755 for (i = 0; i < adap->params.cim_la_size; i++) {
8756 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
8757 V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
8760 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
8763 if (val & F_UPDBGLARDEN) {
8767 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
8771 /* address can't exceed 0xfff (UpDbgLaRdPtr is of 12-bits) */
8772 idx = (idx + 1) & M_UPDBGLARDPTR;
8774 * Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
8775 * identify the 32-bit portion of the full 312-bit data
8778 while ((idx & 0xf) > 9)
8779 idx = (idx + 1) % M_UPDBGLARDPTR;
8782 if (cfg & F_UPDBGLAEN) {
8783 int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
8784 cfg & ~F_UPDBGLARDEN);
8792 * t4_tp_read_la - read TP LA capture buffer
8793 * @adap: the adapter
8794 * @la_buf: where to store the LA data
8795 * @wrptr: the HW write pointer within the capture buffer
8797 * Reads the contents of the TP LA buffer with the most recent entry at
8798 * the end of the returned data and with the entry at @wrptr first.
8799 * We leave the LA in the running state we find it in.
8801 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
8803 bool last_incomplete;
8804 unsigned int i, cfg, val, idx;
8806 cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
8807 if (cfg & F_DBGLAENABLE) /* freeze LA */
8808 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
8809 adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
8811 val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
8812 idx = G_DBGLAWPTR(val);
8813 last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
8814 if (last_incomplete)
8815 idx = (idx + 1) & M_DBGLARPTR;
8820 val &= ~V_DBGLARPTR(M_DBGLARPTR);
8821 val |= adap->params.tp.la_mask;
8823 for (i = 0; i < TPLA_SIZE; i++) {
8824 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
8825 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
8826 idx = (idx + 1) & M_DBGLARPTR;
8829 /* Wipe out last entry if it isn't valid */
8830 if (last_incomplete)
8831 la_buf[TPLA_SIZE - 1] = ~0ULL;
8833 if (cfg & F_DBGLAENABLE) /* restore running state */
8834 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
8835 cfg | adap->params.tp.la_mask);
8839 * SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
8840 * seconds). If we find one of the SGE Ingress DMA State Machines in the same
8841 * state for more than the Warning Threshold then we'll issue a warning about
8842 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel
8843 * appears to be hung every Warning Repeat second till the situation clears.
8844 * If the situation clears, we'll note that as well.
8846 #define SGE_IDMA_WARN_THRESH 1
8847 #define SGE_IDMA_WARN_REPEAT 300
8850 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
8851 * @adapter: the adapter
8852 * @idma: the adapter IDMA Monitor state
8854 * Initialize the state of an SGE Ingress DMA Monitor.
8856 void t4_idma_monitor_init(struct adapter *adapter,
8857 struct sge_idma_monitor_state *idma)
8859 /* Initialize the state variables for detecting an SGE Ingress DMA
8860 * hang. The SGE has internal counters which count up on each clock
8861 * tick whenever the SGE finds its Ingress DMA State Engines in the
8862 * same state they were on the previous clock tick. The clock used is
8863 * the Core Clock so we have a limit on the maximum "time" they can
8864 * record; typically a very small number of seconds. For instance,
8865 * with a 600MHz Core Clock, we can only count up to a bit more than
8866 * 7s. So we'll synthesize a larger counter in order to not run the
8867 * risk of having the "timers" overflow and give us the flexibility to
8868 * maintain a Hung SGE State Machine of our own which operates across
8869 * a longer time frame.
8871 idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
8872 idma->idma_stalled[0] = idma->idma_stalled[1] = 0;
8876 * t4_idma_monitor - monitor SGE Ingress DMA state
8877 * @adapter: the adapter
8878 * @idma: the adapter IDMA Monitor state
8879 * @hz: number of ticks/second
8880 * @ticks: number of ticks since the last IDMA Monitor call
8882 void t4_idma_monitor(struct adapter *adapter,
8883 struct sge_idma_monitor_state *idma,
8886 int i, idma_same_state_cnt[2];
8888 /* Read the SGE Debug Ingress DMA Same State Count registers. These
8889 * are counters inside the SGE which count up on each clock when the
8890 * SGE finds its Ingress DMA State Engines in the same states they
8891 * were in the previous clock. The counters will peg out at
8892 * 0xffffffff without wrapping around so once they pass the 1s
8893 * threshold they'll stay above that till the IDMA state changes.
8895 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13);
8896 idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH);
8897 idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
8899 for (i = 0; i < 2; i++) {
8900 u32 debug0, debug11;
8902 /* If the Ingress DMA Same State Counter ("timer") is less
8903 * than 1s, then we can reset our synthesized Stall Timer and
8904 * continue. If we have previously emitted warnings about a
8905 * potential stalled Ingress Queue, issue a note indicating
8906 * that the Ingress Queue has resumed forward progress.
8908 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
8909 if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz)
8910 CH_WARN(adapter, "SGE idma%d, queue %u, "
8911 "resumed after %d seconds\n",
8912 i, idma->idma_qid[i],
8913 idma->idma_stalled[i]/hz);
8914 idma->idma_stalled[i] = 0;
8918 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
8919 * domain. The first time we get here it'll be because we
8920 * passed the 1s Threshold; each additional time it'll be
8921 * because the RX Timer Callback is being fired on its regular
8924 * If the stall is below our Potential Hung Ingress Queue
8925 * Warning Threshold, continue.
8927 if (idma->idma_stalled[i] == 0) {
8928 idma->idma_stalled[i] = hz;
8929 idma->idma_warn[i] = 0;
8931 idma->idma_stalled[i] += ticks;
8932 idma->idma_warn[i] -= ticks;
8935 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz)
8938 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
8940 if (idma->idma_warn[i] > 0)
8942 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz;
8944 /* Read and save the SGE IDMA State and Queue ID information.
8945 * We do this every time in case it changes across time ...
8946 * can't be too careful ...
8948 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0);
8949 debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
8950 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
8952 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11);
8953 debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
8954 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
8956 CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in "
8957 " state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
8958 i, idma->idma_qid[i], idma->idma_state[i],
8959 idma->idma_stalled[i]/hz,
8961 t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
8966 * t4_read_pace_tbl - read the pace table
8967 * @adap: the adapter
8968 * @pace_vals: holds the returned values
8970 * Returns the values of TP's pace table in microseconds.
8972 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
8976 for (i = 0; i < NTX_SCHED; i++) {
8977 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
8978 v = t4_read_reg(adap, A_TP_PACE_TABLE);
8979 pace_vals[i] = dack_ticks_to_usec(adap, v);
8984 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
8985 * @adap: the adapter
8986 * @sched: the scheduler index
8987 * @kbps: the byte rate in Kbps
8988 * @ipg: the interpacket delay in tenths of nanoseconds
8990 * Return the current configuration of a HW Tx scheduler.
8992 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
8993 unsigned int *ipg, bool sleep_ok)
8995 unsigned int v, addr, bpt, cpt;
8998 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
8999 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
9002 bpt = (v >> 8) & 0xff;
9005 *kbps = 0; /* scheduler disabled */
9007 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
9008 *kbps = (v * bpt) / 125;
9012 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
9013 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
9017 *ipg = (10000 * v) / core_ticks_per_usec(adap);
9022 * t4_load_cfg - download config file
9023 * @adap: the adapter
9024 * @cfg_data: the cfg text file to write
9025 * @size: text file size
9027 * Write the supplied config text file to the card's serial flash.
9029 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
9031 int ret, i, n, cfg_addr;
9033 unsigned int flash_cfg_start_sec;
9034 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
9036 cfg_addr = t4_flash_cfg_addr(adap);
9041 flash_cfg_start_sec = addr / SF_SEC_SIZE;
9043 if (size > FLASH_CFG_MAX_SIZE) {
9044 CH_ERR(adap, "cfg file too large, max is %u bytes\n",
9045 FLASH_CFG_MAX_SIZE);
9049 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
9051 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
9052 flash_cfg_start_sec + i - 1);
9054 * If size == 0 then we're simply erasing the FLASH sectors associated
9055 * with the on-adapter Firmware Configuration File.
9057 if (ret || size == 0)
9060 /* this will write to the flash up to SF_PAGE_SIZE at a time */
9061 for (i = 0; i< size; i+= SF_PAGE_SIZE) {
9062 if ( (size - i) < SF_PAGE_SIZE)
9066 ret = t4_write_flash(adap, addr, n, cfg_data, 1);
9070 addr += SF_PAGE_SIZE;
9071 cfg_data += SF_PAGE_SIZE;
9076 CH_ERR(adap, "config file %s failed %d\n",
9077 (size == 0 ? "clear" : "download"), ret);
9082 * t5_fw_init_extern_mem - initialize the external memory
9083 * @adap: the adapter
9085 * Initializes the external memory on T5.
9087 int t5_fw_init_extern_mem(struct adapter *adap)
9089 u32 params[1], val[1];
9095 val[0] = 0xff; /* Initialize all MCs */
9096 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
9097 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT));
9098 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val,
9099 FW_CMD_MAX_TIMEOUT);
9104 /* BIOS boot headers */
9105 typedef struct pci_expansion_rom_header {
9106 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
9107 u8 reserved[22]; /* Reserved per processor Architecture data */
9108 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
9109 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
9111 /* Legacy PCI Expansion ROM Header */
9112 typedef struct legacy_pci_expansion_rom_header {
9113 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
9114 u8 size512; /* Current Image Size in units of 512 bytes */
9115 u8 initentry_point[4];
9116 u8 cksum; /* Checksum computed on the entire Image */
9117 u8 reserved[16]; /* Reserved */
9118 u8 pcir_offset[2]; /* Offset to PCI Data Struture */
9119 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
9121 /* EFI PCI Expansion ROM Header */
9122 typedef struct efi_pci_expansion_rom_header {
9123 u8 signature[2]; // ROM signature. The value 0xaa55
9124 u8 initialization_size[2]; /* Units 512. Includes this header */
9125 u8 efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
9126 u8 efi_subsystem[2]; /* Subsystem value for EFI image header */
9127 u8 efi_machine_type[2]; /* Machine type from EFI image header */
9128 u8 compression_type[2]; /* Compression type. */
9130 * Compression type definition
9133 * 0x2-0xFFFF: Reserved
9135 u8 reserved[8]; /* Reserved */
9136 u8 efi_image_header_offset[2]; /* Offset to EFI Image */
9137 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
9138 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
9140 /* PCI Data Structure Format */
9141 typedef struct pcir_data_structure { /* PCI Data Structure */
9142 u8 signature[4]; /* Signature. The string "PCIR" */
9143 u8 vendor_id[2]; /* Vendor Identification */
9144 u8 device_id[2]; /* Device Identification */
9145 u8 vital_product[2]; /* Pointer to Vital Product Data */
9146 u8 length[2]; /* PCIR Data Structure Length */
9147 u8 revision; /* PCIR Data Structure Revision */
9148 u8 class_code[3]; /* Class Code */
9149 u8 image_length[2]; /* Image Length. Multiple of 512B */
9150 u8 code_revision[2]; /* Revision Level of Code/Data */
9151 u8 code_type; /* Code Type. */
9153 * PCI Expansion ROM Code Types
9154 * 0x00: Intel IA-32, PC-AT compatible. Legacy
9155 * 0x01: Open Firmware standard for PCI. FCODE
9156 * 0x02: Hewlett-Packard PA RISC. HP reserved
9157 * 0x03: EFI Image. EFI
9158 * 0x04-0xFF: Reserved.
9160 u8 indicator; /* Indicator. Identifies the last image in the ROM */
9161 u8 reserved[2]; /* Reserved */
9162 } pcir_data_t; /* PCI__DATA_STRUCTURE */
9164 /* BOOT constants */
9166 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
9167 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */
9168 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */
9169 BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
9170 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment */
9171 VENDOR_ID = 0x1425, /* Vendor ID */
9172 PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
9176 * modify_device_id - Modifies the device ID of the Boot BIOS image
9177 * @adatper: the device ID to write.
9178 * @boot_data: the boot image to modify.
9180 * Write the supplied device ID to the boot BIOS image.
9182 static void modify_device_id(int device_id, u8 *boot_data)
9184 legacy_pci_exp_rom_header_t *header;
9185 pcir_data_t *pcir_header;
9189 * Loop through all chained images and change the device ID's
9192 header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
9193 pcir_header = (pcir_data_t *) &boot_data[cur_header +
9194 le16_to_cpu(*(u16*)header->pcir_offset)];
9197 * Only modify the Device ID if code type is Legacy or HP.
9198 * 0x00: Okay to modify
9199 * 0x01: FCODE. Do not be modify
9200 * 0x03: Okay to modify
9201 * 0x04-0xFF: Do not modify
9203 if (pcir_header->code_type == 0x00) {
9208 * Modify Device ID to match current adatper
9210 *(u16*) pcir_header->device_id = device_id;
9213 * Set checksum temporarily to 0.
9214 * We will recalculate it later.
9216 header->cksum = 0x0;
9219 * Calculate and update checksum
9221 for (i = 0; i < (header->size512 * 512); i++)
9222 csum += (u8)boot_data[cur_header + i];
9225 * Invert summed value to create the checksum
9226 * Writing new checksum value directly to the boot data
9228 boot_data[cur_header + 7] = -csum;
9230 } else if (pcir_header->code_type == 0x03) {
9233 * Modify Device ID to match current adatper
9235 *(u16*) pcir_header->device_id = device_id;
9241 * Check indicator element to identify if this is the last
9244 if (pcir_header->indicator & 0x80)
9248 * Move header pointer up to the next image in the ROM.
9250 cur_header += header->size512 * 512;
9255 * t4_load_boot - download boot flash
9256 * @adapter: the adapter
9257 * @boot_data: the boot image to write
9258 * @boot_addr: offset in flash to write boot_data
9261 * Write the supplied boot image to the card's serial flash.
9262 * The boot image has the following sections: a 28-byte header and the
9265 int t4_load_boot(struct adapter *adap, u8 *boot_data,
9266 unsigned int boot_addr, unsigned int size)
9268 pci_exp_rom_header_t *header;
9270 pcir_data_t *pcir_header;
9274 unsigned int boot_sector = (boot_addr * 1024 );
9275 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
9278 * Make sure the boot image does not encroach on the firmware region
9280 if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
9281 CH_ERR(adap, "boot image encroaching on firmware region\n");
9286 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
9287 * and Boot configuration data sections. These 3 boot sections span
9288 * sectors 0 to 7 in flash and live right before the FW image location.
9290 i = DIV_ROUND_UP(size ? size : FLASH_FW_START,
9292 ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
9293 (boot_sector >> 16) + i - 1);
9296 * If size == 0 then we're simply erasing the FLASH sectors associated
9297 * with the on-adapter option ROM file
9299 if (ret || (size == 0))
9302 /* Get boot header */
9303 header = (pci_exp_rom_header_t *)boot_data;
9304 pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
9305 /* PCIR Data Structure */
9306 pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
9309 * Perform some primitive sanity testing to avoid accidentally
9310 * writing garbage over the boot sectors. We ought to check for
9311 * more but it's not worth it for now ...
9313 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
9314 CH_ERR(adap, "boot image too small/large\n");
9318 #ifndef CHELSIO_T4_DIAGS
9320 * Check BOOT ROM header signature
9322 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
9323 CH_ERR(adap, "Boot image missing signature\n");
9328 * Check PCI header signature
9330 if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
9331 CH_ERR(adap, "PCI header missing signature\n");
9336 * Check Vendor ID matches Chelsio ID
9338 if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
9339 CH_ERR(adap, "Vendor ID missing signature\n");
9345 * Retrieve adapter's device ID
9347 t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
9348 /* Want to deal with PF 0 so I strip off PF 4 indicator */
9349 device_id = device_id & 0xf0ff;
9352 * Check PCIE Device ID
9354 if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
9356 * Change the device ID in the Boot BIOS image to match
9357 * the Device ID of the current adapter.
9359 modify_device_id(device_id, boot_data);
9363 * Skip over the first SF_PAGE_SIZE worth of data and write it after
9364 * we finish copying the rest of the boot image. This will ensure
9365 * that the BIOS boot header will only be written if the boot image
9366 * was written in full.
9369 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
9370 addr += SF_PAGE_SIZE;
9371 boot_data += SF_PAGE_SIZE;
9372 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
9377 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
9378 (const u8 *)header, 0);
9382 CH_ERR(adap, "boot image download failed, error %d\n", ret);
9387 * t4_flash_bootcfg_addr - return the address of the flash optionrom configuration
9388 * @adapter: the adapter
9390 * Return the address within the flash where the OptionROM Configuration
9391 * is stored, or an error if the device FLASH is too small to contain
9392 * a OptionROM Configuration.
9394 static int t4_flash_bootcfg_addr(struct adapter *adapter)
9397 * If the device FLASH isn't large enough to hold a Firmware
9398 * Configuration File, return an error.
9400 if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE)
9403 return FLASH_BOOTCFG_START;
9406 int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size)
9408 int ret, i, n, cfg_addr;
9410 unsigned int flash_cfg_start_sec;
9411 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
9413 cfg_addr = t4_flash_bootcfg_addr(adap);
9418 flash_cfg_start_sec = addr / SF_SEC_SIZE;
9420 if (size > FLASH_BOOTCFG_MAX_SIZE) {
9421 CH_ERR(adap, "bootcfg file too large, max is %u bytes\n",
9422 FLASH_BOOTCFG_MAX_SIZE);
9426 i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */
9428 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
9429 flash_cfg_start_sec + i - 1);
9432 * If size == 0 then we're simply erasing the FLASH sectors associated
9433 * with the on-adapter OptionROM Configuration File.
9435 if (ret || size == 0)
9438 /* this will write to the flash up to SF_PAGE_SIZE at a time */
9439 for (i = 0; i< size; i+= SF_PAGE_SIZE) {
9440 if ( (size - i) < SF_PAGE_SIZE)
9444 ret = t4_write_flash(adap, addr, n, cfg_data, 0);
9448 addr += SF_PAGE_SIZE;
9449 cfg_data += SF_PAGE_SIZE;
9454 CH_ERR(adap, "boot config data %s failed %d\n",
9455 (size == 0 ? "clear" : "download"), ret);
9460 * t4_set_filter_mode - configure the optional components of filter tuples
9461 * @adap: the adapter
9462 * @mode_map: a bitmap selcting which optional filter components to enable
9463 * @sleep_ok: if true we may sleep while awaiting command completion
9465 * Sets the filter mode by selecting the optional components to enable
9466 * in filter tuples. Returns 0 on success and a negative error if the
9467 * requested mode needs more bits than are available for optional
9470 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map,
9473 static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
9477 for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
9478 if (mode_map & (1 << i))
9480 if (nbits > FILTER_OPT_LEN)
9482 t4_tp_pio_write(adap, &mode_map, 1, A_TP_VLAN_PRI_MAP, sleep_ok);
9483 read_filter_mode_and_ingress_config(adap, sleep_ok);
9489 * t4_clr_port_stats - clear port statistics
9490 * @adap: the adapter
9491 * @idx: the port index
9493 * Clear HW statistics for the given port.
9495 void t4_clr_port_stats(struct adapter *adap, int idx)
9498 u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map;
9502 port_base_addr = PORT_BASE(idx);
9504 port_base_addr = T5_PORT_BASE(idx);
9506 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
9507 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
9508 t4_write_reg(adap, port_base_addr + i, 0);
9509 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
9510 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
9511 t4_write_reg(adap, port_base_addr + i, 0);
9512 for (i = 0; i < 4; i++)
9513 if (bgmap & (1 << i)) {
9515 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
9517 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
9522 * t4_i2c_rd - read I2C data from adapter
9523 * @adap: the adapter
9524 * @port: Port number if per-port device; <0 if not
9525 * @devid: per-port device ID or absolute device ID
9526 * @offset: byte offset into device I2C space
9527 * @len: byte length of I2C space data
9528 * @buf: buffer in which to return I2C data
9530 * Reads the I2C data from the indicated device and location.
9532 int t4_i2c_rd(struct adapter *adap, unsigned int mbox,
9533 int port, unsigned int devid,
9534 unsigned int offset, unsigned int len,
9538 struct fw_ldst_cmd ldst;
9544 len > sizeof ldst.u.i2c.data)
9547 memset(&ldst, 0, sizeof ldst);
9548 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
9549 ldst.op_to_addrspace =
9550 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
9554 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
9555 ldst.u.i2c.pid = (port < 0 ? 0xff : port);
9556 ldst.u.i2c.did = devid;
9557 ldst.u.i2c.boffset = offset;
9558 ldst.u.i2c.blen = len;
9559 ret = t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
9561 memcpy(buf, ldst.u.i2c.data, len);
9566 * t4_i2c_wr - write I2C data to adapter
9567 * @adap: the adapter
9568 * @port: Port number if per-port device; <0 if not
9569 * @devid: per-port device ID or absolute device ID
9570 * @offset: byte offset into device I2C space
9571 * @len: byte length of I2C space data
9572 * @buf: buffer containing new I2C data
9574 * Write the I2C data to the indicated device and location.
9576 int t4_i2c_wr(struct adapter *adap, unsigned int mbox,
9577 int port, unsigned int devid,
9578 unsigned int offset, unsigned int len,
9582 struct fw_ldst_cmd ldst;
9587 len > sizeof ldst.u.i2c.data)
9590 memset(&ldst, 0, sizeof ldst);
9591 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
9592 ldst.op_to_addrspace =
9593 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
9597 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
9598 ldst.u.i2c.pid = (port < 0 ? 0xff : port);
9599 ldst.u.i2c.did = devid;
9600 ldst.u.i2c.boffset = offset;
9601 ldst.u.i2c.blen = len;
9602 memcpy(ldst.u.i2c.data, buf, len);
9603 return t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
9607 * t4_sge_ctxt_rd - read an SGE context through FW
9608 * @adap: the adapter
9609 * @mbox: mailbox to use for the FW command
9610 * @cid: the context id
9611 * @ctype: the context type
9612 * @data: where to store the context data
9614 * Issues a FW command through the given mailbox to read an SGE context.
9616 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
9617 enum ctxt_type ctype, u32 *data)
9620 struct fw_ldst_cmd c;
9622 if (ctype == CTXT_EGRESS)
9623 ret = FW_LDST_ADDRSPC_SGE_EGRC;
9624 else if (ctype == CTXT_INGRESS)
9625 ret = FW_LDST_ADDRSPC_SGE_INGC;
9626 else if (ctype == CTXT_FLM)
9627 ret = FW_LDST_ADDRSPC_SGE_FLMC;
9629 ret = FW_LDST_ADDRSPC_SGE_CONMC;
9631 memset(&c, 0, sizeof(c));
9632 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
9633 F_FW_CMD_REQUEST | F_FW_CMD_READ |
9634 V_FW_LDST_CMD_ADDRSPACE(ret));
9635 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
9636 c.u.idctxt.physid = cpu_to_be32(cid);
9638 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
9640 data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
9641 data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
9642 data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
9643 data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
9644 data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
9645 data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
9651 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
9652 * @adap: the adapter
9653 * @cid: the context id
9654 * @ctype: the context type
9655 * @data: where to store the context data
9657 * Reads an SGE context directly, bypassing FW. This is only for
9658 * debugging when FW is unavailable.
9660 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
9665 t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
9666 ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
9668 for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
9669 *data++ = t4_read_reg(adap, i);
9673 int t4_sched_config(struct adapter *adapter, int type, int minmaxen,
9676 struct fw_sched_cmd cmd;
9678 memset(&cmd, 0, sizeof(cmd));
9679 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
9682 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9684 cmd.u.config.sc = FW_SCHED_SC_CONFIG;
9685 cmd.u.config.type = type;
9686 cmd.u.config.minmaxen = minmaxen;
9688 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
9692 int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
9693 int rateunit, int ratemode, int channel, int cl,
9694 int minrate, int maxrate, int weight, int pktsize,
9697 struct fw_sched_cmd cmd;
9699 memset(&cmd, 0, sizeof(cmd));
9700 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
9703 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9705 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
9706 cmd.u.params.type = type;
9707 cmd.u.params.level = level;
9708 cmd.u.params.mode = mode;
9709 cmd.u.params.ch = channel;
9710 cmd.u.params.cl = cl;
9711 cmd.u.params.unit = rateunit;
9712 cmd.u.params.rate = ratemode;
9713 cmd.u.params.min = cpu_to_be32(minrate);
9714 cmd.u.params.max = cpu_to_be32(maxrate);
9715 cmd.u.params.weight = cpu_to_be16(weight);
9716 cmd.u.params.pktsize = cpu_to_be16(pktsize);
9718 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
9722 int t4_sched_params_ch_rl(struct adapter *adapter, int channel, int ratemode,
9723 unsigned int maxrate, int sleep_ok)
9725 struct fw_sched_cmd cmd;
9727 memset(&cmd, 0, sizeof(cmd));
9728 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
9731 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9733 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
9734 cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
9735 cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CH_RL;
9736 cmd.u.params.ch = channel;
9737 cmd.u.params.rate = ratemode; /* REL or ABS */
9738 cmd.u.params.max = cpu_to_be32(maxrate);/* % or kbps */
9740 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
9744 int t4_sched_params_cl_wrr(struct adapter *adapter, int channel, int cl,
9745 int weight, int sleep_ok)
9747 struct fw_sched_cmd cmd;
9749 if (weight < 0 || weight > 100)
9752 memset(&cmd, 0, sizeof(cmd));
9753 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
9756 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9758 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
9759 cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
9760 cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_WRR;
9761 cmd.u.params.ch = channel;
9762 cmd.u.params.cl = cl;
9763 cmd.u.params.weight = cpu_to_be16(weight);
9765 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
9769 int t4_sched_params_cl_rl_kbps(struct adapter *adapter, int channel, int cl,
9770 int mode, unsigned int maxrate, int pktsize, int sleep_ok)
9772 struct fw_sched_cmd cmd;
9774 memset(&cmd, 0, sizeof(cmd));
9775 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
9778 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9780 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
9781 cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
9782 cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_RL;
9783 cmd.u.params.mode = mode;
9784 cmd.u.params.ch = channel;
9785 cmd.u.params.cl = cl;
9786 cmd.u.params.unit = FW_SCHED_PARAMS_UNIT_BITRATE;
9787 cmd.u.params.rate = FW_SCHED_PARAMS_RATE_ABS;
9788 cmd.u.params.max = cpu_to_be32(maxrate);
9789 cmd.u.params.pktsize = cpu_to_be16(pktsize);
9791 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
9796 * t4_config_watchdog - configure (enable/disable) a watchdog timer
9797 * @adapter: the adapter
9798 * @mbox: mailbox to use for the FW command
9799 * @pf: the PF owning the queue
9800 * @vf: the VF owning the queue
9801 * @timeout: watchdog timeout in ms
9802 * @action: watchdog timer / action
9804 * There are separate watchdog timers for each possible watchdog
9805 * action. Configure one of the watchdog timers by setting a non-zero
9806 * timeout. Disable a watchdog timer by using a timeout of zero.
9808 int t4_config_watchdog(struct adapter *adapter, unsigned int mbox,
9809 unsigned int pf, unsigned int vf,
9810 unsigned int timeout, unsigned int action)
9812 struct fw_watchdog_cmd wdog;
9816 * The watchdog command expects a timeout in units of 10ms so we need
9817 * to convert it here (via rounding) and force a minimum of one 10ms
9818 * "tick" if the timeout is non-zero but the conversion results in 0
9821 ticks = (timeout + 5)/10;
9822 if (timeout && !ticks)
9825 memset(&wdog, 0, sizeof wdog);
9826 wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) |
9829 V_FW_PARAMS_CMD_PFN(pf) |
9830 V_FW_PARAMS_CMD_VFN(vf));
9831 wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog));
9832 wdog.timeout = cpu_to_be32(ticks);
9833 wdog.action = cpu_to_be32(action);
9835 return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL);
9838 int t4_get_devlog_level(struct adapter *adapter, unsigned int *level)
9840 struct fw_devlog_cmd devlog_cmd;
9843 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
9844 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
9845 F_FW_CMD_REQUEST | F_FW_CMD_READ);
9846 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
9847 ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
9848 sizeof(devlog_cmd), &devlog_cmd);
9852 *level = devlog_cmd.level;
9856 int t4_set_devlog_level(struct adapter *adapter, unsigned int level)
9858 struct fw_devlog_cmd devlog_cmd;
9860 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
9861 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
9864 devlog_cmd.level = level;
9865 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
9866 return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
9867 sizeof(devlog_cmd), &devlog_cmd);