2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2012, 2016 Chelsio Communications, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/eventhandler.h>
39 #include "t4_regs_values.h"
40 #include "firmware/t4fw_interface.h"
43 #define msleep(x) do { \
47 pause("t4hw", (x) * hz / 1000); \
51 * t4_wait_op_done_val - wait until an operation is completed
52 * @adapter: the adapter performing the operation
53 * @reg: the register to check for completion
54 * @mask: a single-bit field within @reg that indicates completion
55 * @polarity: the value of the field when the operation is completed
56 * @attempts: number of check iterations
57 * @delay: delay in usecs between iterations
58 * @valp: where to store the value of the register at completion time
60 * Wait until an operation is completed by checking a bit in a register
61 * up to @attempts times. If @valp is not NULL the value of the register
62 * at the time it indicated completion is stored there. Returns 0 if the
63 * operation completes and -EAGAIN otherwise.
65 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
66 int polarity, int attempts, int delay, u32 *valp)
69 u32 val = t4_read_reg(adapter, reg);
71 if (!!(val & mask) == polarity) {
83 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
84 int polarity, int attempts, int delay)
86 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
91 * t4_set_reg_field - set a register field to a value
92 * @adapter: the adapter to program
93 * @addr: the register address
94 * @mask: specifies the portion of the register to modify
95 * @val: the new value for the register field
97 * Sets a register field specified by the supplied mask to the
100 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
103 u32 v = t4_read_reg(adapter, addr) & ~mask;
105 t4_write_reg(adapter, addr, v | val);
106 (void) t4_read_reg(adapter, addr); /* flush */
110 * t4_read_indirect - read indirectly addressed registers
112 * @addr_reg: register holding the indirect address
113 * @data_reg: register holding the value of the indirect register
114 * @vals: where the read register values are stored
115 * @nregs: how many indirect registers to read
116 * @start_idx: index of first indirect register to read
118 * Reads registers that are accessed indirectly through an address/data
121 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
122 unsigned int data_reg, u32 *vals,
123 unsigned int nregs, unsigned int start_idx)
126 t4_write_reg(adap, addr_reg, start_idx);
127 *vals++ = t4_read_reg(adap, data_reg);
133 * t4_write_indirect - write indirectly addressed registers
135 * @addr_reg: register holding the indirect addresses
136 * @data_reg: register holding the value for the indirect registers
137 * @vals: values to write
138 * @nregs: how many indirect registers to write
139 * @start_idx: address of first indirect register to write
141 * Writes a sequential block of registers that are accessed indirectly
142 * through an address/data register pair.
144 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
145 unsigned int data_reg, const u32 *vals,
146 unsigned int nregs, unsigned int start_idx)
149 t4_write_reg(adap, addr_reg, start_idx++);
150 t4_write_reg(adap, data_reg, *vals++);
155 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
156 * mechanism. This guarantees that we get the real value even if we're
157 * operating within a Virtual Machine and the Hypervisor is trapping our
158 * Configuration Space accesses.
160 * N.B. This routine should only be used as a last resort: the firmware uses
161 * the backdoor registers on a regular basis and we can end up
162 * conflicting with it's uses!
164 u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
166 u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg);
169 if (chip_id(adap) <= CHELSIO_T5)
177 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req);
178 val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
181 * Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
182 * Configuration Space read. (None of the other fields matter when
183 * F_ENABLE is 0 so a simple register write is easier than a
184 * read-modify-write via t4_set_reg_field().)
186 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0);
192 * t4_report_fw_error - report firmware error
195 * The adapter firmware can indicate error conditions to the host.
196 * If the firmware has indicated an error, print out the reason for
197 * the firmware error.
199 static void t4_report_fw_error(struct adapter *adap)
201 static const char *const reason[] = {
202 "Crash", /* PCIE_FW_EVAL_CRASH */
203 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
204 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
205 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
206 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
207 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
208 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
209 "Reserved", /* reserved */
213 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
214 if (pcie_fw & F_PCIE_FW_ERR) {
215 adap->flags &= ~FW_OK;
216 CH_ERR(adap, "firmware reports adapter error: %s (0x%08x)\n",
217 reason[G_PCIE_FW_EVAL(pcie_fw)], pcie_fw);
218 if (pcie_fw != 0xffffffff)
219 t4_os_dump_devlog(adap);
224 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
226 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
229 for ( ; nflit; nflit--, mbox_addr += 8)
230 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
234 * Handle a FW assertion reported in a mailbox.
236 static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt)
239 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
240 asrt->u.assert.filename_0_7,
241 be32_to_cpu(asrt->u.assert.line),
242 be32_to_cpu(asrt->u.assert.x),
243 be32_to_cpu(asrt->u.assert.y));
246 struct port_tx_state {
252 read_tx_state_one(struct adapter *sc, int i, struct port_tx_state *tx_state)
254 uint32_t rx_pause_reg, tx_frames_reg;
257 tx_frames_reg = PORT_REG(i, A_MPS_PORT_STAT_TX_PORT_FRAMES_L);
258 rx_pause_reg = PORT_REG(i, A_MPS_PORT_STAT_RX_PORT_PAUSE_L);
260 tx_frames_reg = T5_PORT_REG(i, A_MPS_PORT_STAT_TX_PORT_FRAMES_L);
261 rx_pause_reg = T5_PORT_REG(i, A_MPS_PORT_STAT_RX_PORT_PAUSE_L);
264 tx_state->rx_pause = t4_read_reg64(sc, rx_pause_reg);
265 tx_state->tx_frames = t4_read_reg64(sc, tx_frames_reg);
269 read_tx_state(struct adapter *sc, struct port_tx_state *tx_state)
274 read_tx_state_one(sc, i, &tx_state[i]);
278 check_tx_state(struct adapter *sc, struct port_tx_state *tx_state)
280 uint32_t port_ctl_reg;
281 uint64_t tx_frames, rx_pause;
284 for_each_port(sc, i) {
285 rx_pause = tx_state[i].rx_pause;
286 tx_frames = tx_state[i].tx_frames;
287 read_tx_state_one(sc, i, &tx_state[i]); /* update */
290 port_ctl_reg = PORT_REG(i, A_MPS_PORT_CTL);
292 port_ctl_reg = T5_PORT_REG(i, A_MPS_PORT_CTL);
293 if (t4_read_reg(sc, port_ctl_reg) & F_PORTTXEN &&
294 rx_pause != tx_state[i].rx_pause &&
295 tx_frames == tx_state[i].tx_frames) {
296 t4_set_reg_field(sc, port_ctl_reg, F_PORTTXEN, 0);
298 t4_set_reg_field(sc, port_ctl_reg, F_PORTTXEN, F_PORTTXEN);
303 #define X_CIM_PF_NOACCESS 0xeeeeeeee
305 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
307 * @mbox: index of the mailbox to use
308 * @cmd: the command to write
309 * @size: command length in bytes
310 * @rpl: where to optionally store the reply
311 * @sleep_ok: if true we may sleep while awaiting command completion
312 * @timeout: time to wait for command to finish before timing out
313 * (negative implies @sleep_ok=false)
315 * Sends the given command to FW through the selected mailbox and waits
316 * for the FW to execute the command. If @rpl is not %NULL it is used to
317 * store the FW's reply to the command. The command and its optional
318 * reply are of the same length. Some FW commands like RESET and
319 * INITIALIZE can take a considerable amount of time to execute.
320 * @sleep_ok determines whether we may sleep while awaiting the response.
321 * If sleeping is allowed we use progressive backoff otherwise we spin.
322 * Note that passing in a negative @timeout is an alternate mechanism
323 * for specifying @sleep_ok=false. This is useful when a higher level
324 * interface allows for specification of @timeout but not @sleep_ok ...
326 * The return value is 0 on success or a negative errno on failure. A
327 * failure can happen either because we are not able to execute the
328 * command or FW executes it but signals an error. In the latter case
329 * the return value is the error code indicated by FW (negated).
331 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
332 int size, void *rpl, bool sleep_ok, int timeout)
335 * We delay in small increments at first in an effort to maintain
336 * responsiveness for simple, fast executing commands but then back
337 * off to larger delays to a maximum retry delay.
339 static const int delay[] = {
340 1, 1, 3, 5, 10, 10, 20, 50, 100
344 int i, ms, delay_idx, ret, next_tx_check;
345 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
346 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
348 __be64 cmd_rpl[MBOX_LEN/8];
350 struct port_tx_state tx_state[MAX_NPORTS];
352 if (adap->flags & CHK_MBOX_ACCESS)
353 ASSERT_SYNCHRONIZED_OP(adap);
355 if (size <= 0 || (size & 15) || size > MBOX_LEN)
358 if (adap->flags & IS_VF) {
360 data_reg = FW_T6VF_MBDATA_BASE_ADDR;
362 data_reg = FW_T4VF_MBDATA_BASE_ADDR;
363 ctl_reg = VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL);
367 * If we have a negative timeout, that implies that we can't sleep.
375 * Attempt to gain access to the mailbox.
377 for (i = 0; i < 4; i++) {
378 ctl = t4_read_reg(adap, ctl_reg);
380 if (v != X_MBOWNER_NONE)
385 * If we were unable to gain access, report the error to our caller.
387 if (v != X_MBOWNER_PL) {
388 t4_report_fw_error(adap);
389 ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
394 * If we gain ownership of the mailbox and there's a "valid" message
395 * in it, this is likely an asynchronous error message from the
396 * firmware. So we'll report that and then proceed on with attempting
397 * to issue our own command ... which may well fail if the error
398 * presaged the firmware crashing ...
400 if (ctl & F_MBMSGVALID) {
401 CH_DUMP_MBOX(adap, mbox, data_reg, "VLD", NULL, true);
405 * Copy in the new mailbox command and send it on its way ...
407 memset(cmd_rpl, 0, sizeof(cmd_rpl));
408 memcpy(cmd_rpl, cmd, size);
409 CH_DUMP_MBOX(adap, mbox, 0, "cmd", cmd_rpl, false);
410 for (i = 0; i < ARRAY_SIZE(cmd_rpl); i++)
411 t4_write_reg64(adap, data_reg + i * 8, be64_to_cpu(cmd_rpl[i]));
413 if (adap->flags & IS_VF) {
415 * For the VFs, the Mailbox Data "registers" are
416 * actually backed by T4's "MA" interface rather than
417 * PL Registers (as is the case for the PFs). Because
418 * these are in different coherency domains, the write
419 * to the VF's PL-register-backed Mailbox Control can
420 * race in front of the writes to the MA-backed VF
421 * Mailbox Data "registers". So we need to do a
422 * read-back on at least one byte of the VF Mailbox
423 * Data registers before doing the write to the VF
424 * Mailbox Control register.
426 t4_read_reg(adap, data_reg);
429 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
430 read_tx_state(adap, &tx_state[0]); /* also flushes the write_reg */
431 next_tx_check = 1000;
436 * Loop waiting for the reply; bail out if we time out or the firmware
440 for (i = 0; i < timeout; i += ms) {
441 if (!(adap->flags & IS_VF)) {
442 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
443 if (pcie_fw & F_PCIE_FW_ERR)
447 if (i >= next_tx_check) {
448 check_tx_state(adap, &tx_state[0]);
449 next_tx_check = i + 1000;
453 ms = delay[delay_idx]; /* last element may repeat */
454 if (delay_idx < ARRAY_SIZE(delay) - 1)
461 v = t4_read_reg(adap, ctl_reg);
462 if (v == X_CIM_PF_NOACCESS)
464 if (G_MBOWNER(v) == X_MBOWNER_PL) {
465 if (!(v & F_MBMSGVALID)) {
466 t4_write_reg(adap, ctl_reg,
467 V_MBOWNER(X_MBOWNER_NONE));
472 * Retrieve the command reply and release the mailbox.
474 get_mbox_rpl(adap, cmd_rpl, MBOX_LEN/8, data_reg);
475 CH_DUMP_MBOX(adap, mbox, 0, "rpl", cmd_rpl, false);
476 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
478 res = be64_to_cpu(cmd_rpl[0]);
479 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
480 fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl);
481 res = V_FW_CMD_RETVAL(EIO);
483 memcpy(rpl, cmd_rpl, size);
484 return -G_FW_CMD_RETVAL((int)res);
489 * We timed out waiting for a reply to our mailbox command. Report
490 * the error and also check to see if the firmware reported any
493 CH_ERR(adap, "command %#x in mbox %d timed out (0x%08x).\n",
494 *(const u8 *)cmd, mbox, pcie_fw);
495 CH_DUMP_MBOX(adap, mbox, 0, "cmdsent", cmd_rpl, true);
496 CH_DUMP_MBOX(adap, mbox, data_reg, "current", NULL, true);
498 if (pcie_fw & F_PCIE_FW_ERR) {
500 t4_report_fw_error(adap);
503 t4_os_dump_devlog(adap);
506 t4_fatal_err(adap, true);
510 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
511 void *rpl, bool sleep_ok)
513 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl,
514 sleep_ok, FW_CMD_MAX_TIMEOUT);
518 static int t4_edc_err_read(struct adapter *adap, int idx)
520 u32 edc_ecc_err_addr_reg;
521 u32 edc_bist_status_rdata_reg;
524 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
527 if (idx != MEM_EDC0 && idx != MEM_EDC1) {
528 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
532 edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx);
533 edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx);
536 "edc%d err addr 0x%x: 0x%x.\n",
537 idx, edc_ecc_err_addr_reg,
538 t4_read_reg(adap, edc_ecc_err_addr_reg));
540 "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
541 edc_bist_status_rdata_reg,
542 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg),
543 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8),
544 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16),
545 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24),
546 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32),
547 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40),
548 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48),
549 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56),
550 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64));
556 * t4_mc_read - read from MC through backdoor accesses
558 * @idx: which MC to access
559 * @addr: address of first byte requested
560 * @data: 64 bytes of data containing the requested address
561 * @ecc: where to store the corresponding 64-bit ECC word
563 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
564 * that covers the requested address @addr. If @parity is not %NULL it
565 * is assigned the 64-bit ECC word for the read data.
567 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
570 u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
571 u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
574 mc_bist_cmd_reg = A_MC_BIST_CMD;
575 mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR;
576 mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN;
577 mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA;
578 mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN;
580 mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx);
581 mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx);
582 mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx);
583 mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA,
585 mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN,
589 if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST)
591 t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU);
592 t4_write_reg(adap, mc_bist_cmd_len_reg, 64);
593 t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc);
594 t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) |
595 F_START_BIST | V_BIST_CMD_GAP(1));
596 i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
600 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i)
602 for (i = 15; i >= 0; i--)
603 *data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
605 *ecc = t4_read_reg64(adap, MC_DATA(16));
611 * t4_edc_read - read from EDC through backdoor accesses
613 * @idx: which EDC to access
614 * @addr: address of first byte requested
615 * @data: 64 bytes of data containing the requested address
616 * @ecc: where to store the corresponding 64-bit ECC word
618 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
619 * that covers the requested address @addr. If @parity is not %NULL it
620 * is assigned the 64-bit ECC word for the read data.
622 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
625 u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
626 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
629 edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx);
630 edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx);
631 edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx);
632 edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN,
634 edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA,
638 * These macro are missing in t4_regs.h file.
639 * Added temporarily for testing.
641 #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
642 #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
643 edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx);
644 edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx);
645 edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx);
646 edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN,
648 edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA,
654 if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST)
656 t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU);
657 t4_write_reg(adap, edc_bist_cmd_len_reg, 64);
658 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
659 t4_write_reg(adap, edc_bist_cmd_reg,
660 V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
661 i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
665 #define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i)
667 for (i = 15; i >= 0; i--)
668 *data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
670 *ecc = t4_read_reg64(adap, EDC_DATA(16));
676 * t4_mem_read - read EDC 0, EDC 1 or MC into buffer
678 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
679 * @addr: address within indicated memory type
680 * @len: amount of memory to read
681 * @buf: host memory buffer
683 * Reads an [almost] arbitrary memory region in the firmware: the
684 * firmware memory address, length and host buffer must be aligned on
685 * 32-bit boudaries. The memory is returned as a raw byte sequence from
686 * the firmware's memory. If this memory contains data structures which
687 * contain multi-byte integers, it's the callers responsibility to
688 * perform appropriate byte order conversions.
690 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
693 u32 pos, start, end, offset;
697 * Argument sanity checks ...
699 if ((addr & 0x3) || (len & 0x3))
703 * The underlaying EDC/MC read routines read 64 bytes at a time so we
704 * need to round down the start and round up the end. We'll start
705 * copying out of the first line at (addr - start) a word at a time.
707 start = rounddown2(addr, 64);
708 end = roundup2(addr + len, 64);
709 offset = (addr - start)/sizeof(__be32);
711 for (pos = start; pos < end; pos += 64, offset = 0) {
715 * Read the chip's memory block and bail if there's an error.
717 if ((mtype == MEM_MC) || (mtype == MEM_MC1))
718 ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL);
720 ret = t4_edc_read(adap, mtype, pos, data, NULL);
725 * Copy the data into the caller's memory buffer.
727 while (offset < 16 && len > 0) {
728 *buf++ = data[offset++];
729 len -= sizeof(__be32);
737 * Return the specified PCI-E Configuration Space register from our Physical
738 * Function. We try first via a Firmware LDST Command (if fw_attach != 0)
739 * since we prefer to let the firmware own all of these registers, but if that
740 * fails we go for it directly ourselves.
742 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach)
746 * If fw_attach != 0, construct and send the Firmware LDST Command to
747 * retrieve the specified PCI-E Configuration Space register.
749 if (drv_fw_attach != 0) {
750 struct fw_ldst_cmd ldst_cmd;
753 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
754 ldst_cmd.op_to_addrspace =
755 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
758 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
759 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
760 ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1);
761 ldst_cmd.u.pcie.ctrl_to_fn =
762 (F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf));
763 ldst_cmd.u.pcie.r = reg;
766 * If the LDST Command succeeds, return the result, otherwise
767 * fall through to reading it directly ourselves ...
769 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
772 return be32_to_cpu(ldst_cmd.u.pcie.data[0]);
774 CH_WARN(adap, "Firmware failed to return "
775 "Configuration Space register %d, err = %d\n",
780 * Read the desired Configuration Space register via the PCI-E
781 * Backdoor mechanism.
783 return t4_hw_pci_read_cfg4(adap, reg);
787 * t4_get_regs_len - return the size of the chips register set
788 * @adapter: the adapter
790 * Returns the size of the chip's BAR0 register space.
792 unsigned int t4_get_regs_len(struct adapter *adapter)
794 unsigned int chip_version = chip_id(adapter);
796 switch (chip_version) {
798 if (adapter->flags & IS_VF)
799 return FW_T4VF_REGMAP_SIZE;
800 return T4_REGMAP_SIZE;
804 if (adapter->flags & IS_VF)
805 return FW_T4VF_REGMAP_SIZE;
806 return T5_REGMAP_SIZE;
810 "Unsupported chip version %d\n", chip_version);
815 * t4_get_regs - read chip registers into provided buffer
817 * @buf: register buffer
818 * @buf_size: size (in bytes) of register buffer
820 * If the provided register buffer isn't large enough for the chip's
821 * full register range, the register dump will be truncated to the
822 * register buffer's size.
824 void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
826 static const unsigned int t4_reg_ranges[] = {
1285 static const unsigned int t4vf_reg_ranges[] = {
1286 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
1287 VF_MPS_REG(A_MPS_VF_CTL),
1288 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
1289 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_WHOAMI),
1290 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
1291 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
1292 FW_T4VF_MBDATA_BASE_ADDR,
1293 FW_T4VF_MBDATA_BASE_ADDR +
1294 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
1297 static const unsigned int t5_reg_ranges[] = {
2061 static const unsigned int t5vf_reg_ranges[] = {
2062 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
2063 VF_MPS_REG(A_MPS_VF_CTL),
2064 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
2065 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION),
2066 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
2067 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
2068 FW_T4VF_MBDATA_BASE_ADDR,
2069 FW_T4VF_MBDATA_BASE_ADDR +
2070 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
2073 static const unsigned int t6_reg_ranges[] = {
2630 static const unsigned int t6vf_reg_ranges[] = {
2631 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
2632 VF_MPS_REG(A_MPS_VF_CTL),
2633 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
2634 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION),
2635 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
2636 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
2637 FW_T6VF_MBDATA_BASE_ADDR,
2638 FW_T6VF_MBDATA_BASE_ADDR +
2639 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
2642 u32 *buf_end = (u32 *)(buf + buf_size);
2643 const unsigned int *reg_ranges;
2644 int reg_ranges_size, range;
2645 unsigned int chip_version = chip_id(adap);
2648 * Select the right set of register ranges to dump depending on the
2649 * adapter chip type.
2651 switch (chip_version) {
2653 if (adap->flags & IS_VF) {
2654 reg_ranges = t4vf_reg_ranges;
2655 reg_ranges_size = ARRAY_SIZE(t4vf_reg_ranges);
2657 reg_ranges = t4_reg_ranges;
2658 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
2663 if (adap->flags & IS_VF) {
2664 reg_ranges = t5vf_reg_ranges;
2665 reg_ranges_size = ARRAY_SIZE(t5vf_reg_ranges);
2667 reg_ranges = t5_reg_ranges;
2668 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
2673 if (adap->flags & IS_VF) {
2674 reg_ranges = t6vf_reg_ranges;
2675 reg_ranges_size = ARRAY_SIZE(t6vf_reg_ranges);
2677 reg_ranges = t6_reg_ranges;
2678 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
2684 "Unsupported chip version %d\n", chip_version);
2689 * Clear the register buffer and insert the appropriate register
2690 * values selected by the above register ranges.
2692 memset(buf, 0, buf_size);
2693 for (range = 0; range < reg_ranges_size; range += 2) {
2694 unsigned int reg = reg_ranges[range];
2695 unsigned int last_reg = reg_ranges[range + 1];
2696 u32 *bufp = (u32 *)(buf + reg);
2699 * Iterate across the register range filling in the register
2700 * buffer but don't write past the end of the register buffer.
2702 while (reg <= last_reg && bufp < buf_end) {
2703 *bufp++ = t4_read_reg(adap, reg);
2710 * Partial EEPROM Vital Product Data structure. The VPD starts with one ID
2711 * header followed by one or more VPD-R sections, each with its own header.
2719 struct t4_vpdr_hdr {
2725 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
2727 #define EEPROM_DELAY 10 /* 10us per poll spin */
2728 #define EEPROM_MAX_POLL 5000 /* x 5000 == 50ms */
2730 #define EEPROM_STAT_ADDR 0x7bfc
2731 #define VPD_SIZE 0x800
2732 #define VPD_BASE 0x400
2733 #define VPD_BASE_OLD 0
2734 #define VPD_LEN 1024
2735 #define VPD_INFO_FLD_HDR_SIZE 3
2736 #define CHELSIO_VPD_UNIQUE_ID 0x82
2739 * Small utility function to wait till any outstanding VPD Access is complete.
2740 * We have a per-adapter state variable "VPD Busy" to indicate when we have a
2741 * VPD Access in flight. This allows us to handle the problem of having a
2742 * previous VPD Access time out and prevent an attempt to inject a new VPD
2743 * Request before any in-flight VPD reguest has completed.
2745 static int t4_seeprom_wait(struct adapter *adapter)
2747 unsigned int base = adapter->params.pci.vpd_cap_addr;
2751 * If no VPD Access is in flight, we can just return success right
2754 if (!adapter->vpd_busy)
2758 * Poll the VPD Capability Address/Flag register waiting for it
2759 * to indicate that the operation is complete.
2761 max_poll = EEPROM_MAX_POLL;
2765 udelay(EEPROM_DELAY);
2766 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
2769 * If the operation is complete, mark the VPD as no longer
2770 * busy and return success.
2772 if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
2773 adapter->vpd_busy = 0;
2776 } while (--max_poll);
2779 * Failure! Note that we leave the VPD Busy status set in order to
2780 * avoid pushing a new VPD Access request into the VPD Capability till
2781 * the current operation eventually succeeds. It's a bug to issue a
2782 * new request when an existing request is in flight and will result
2783 * in corrupt hardware state.
2789 * t4_seeprom_read - read a serial EEPROM location
2790 * @adapter: adapter to read
2791 * @addr: EEPROM virtual address
2792 * @data: where to store the read data
2794 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
2795 * VPD capability. Note that this function must be called with a virtual
2798 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
2800 unsigned int base = adapter->params.pci.vpd_cap_addr;
2804 * VPD Accesses must alway be 4-byte aligned!
2806 if (addr >= EEPROMVSIZE || (addr & 3))
2810 * Wait for any previous operation which may still be in flight to
2813 ret = t4_seeprom_wait(adapter);
2815 CH_ERR(adapter, "VPD still busy from previous operation\n");
2820 * Issue our new VPD Read request, mark the VPD as being busy and wait
2821 * for our request to complete. If it doesn't complete, note the
2822 * error and return it to our caller. Note that we do not reset the
2825 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
2826 adapter->vpd_busy = 1;
2827 adapter->vpd_flag = PCI_VPD_ADDR_F;
2828 ret = t4_seeprom_wait(adapter);
2830 CH_ERR(adapter, "VPD read of address %#x failed\n", addr);
2835 * Grab the returned data, swizzle it into our endianness and
2838 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
2839 *data = le32_to_cpu(*data);
2844 * t4_seeprom_write - write a serial EEPROM location
2845 * @adapter: adapter to write
2846 * @addr: virtual EEPROM address
2847 * @data: value to write
2849 * Write a 32-bit word to a location in serial EEPROM using the card's PCI
2850 * VPD capability. Note that this function must be called with a virtual
2853 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
2855 unsigned int base = adapter->params.pci.vpd_cap_addr;
2861 * VPD Accesses must alway be 4-byte aligned!
2863 if (addr >= EEPROMVSIZE || (addr & 3))
2867 * Wait for any previous operation which may still be in flight to
2870 ret = t4_seeprom_wait(adapter);
2872 CH_ERR(adapter, "VPD still busy from previous operation\n");
2877 * Issue our new VPD Read request, mark the VPD as being busy and wait
2878 * for our request to complete. If it doesn't complete, note the
2879 * error and return it to our caller. Note that we do not reset the
2882 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
2884 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
2885 (u16)addr | PCI_VPD_ADDR_F);
2886 adapter->vpd_busy = 1;
2887 adapter->vpd_flag = 0;
2888 ret = t4_seeprom_wait(adapter);
2890 CH_ERR(adapter, "VPD write of address %#x failed\n", addr);
2895 * Reset PCI_VPD_DATA register after a transaction and wait for our
2896 * request to complete. If it doesn't complete, return error.
2898 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
2899 max_poll = EEPROM_MAX_POLL;
2901 udelay(EEPROM_DELAY);
2902 t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
2903 } while ((stats_reg & 0x1) && --max_poll);
2907 /* Return success! */
2912 * t4_eeprom_ptov - translate a physical EEPROM address to virtual
2913 * @phys_addr: the physical EEPROM address
2914 * @fn: the PCI function number
2915 * @sz: size of function-specific area
2917 * Translate a physical EEPROM address to virtual. The first 1K is
2918 * accessed through virtual addresses starting at 31K, the rest is
2919 * accessed through virtual addresses starting at 0.
2921 * The mapping is as follows:
2922 * [0..1K) -> [31K..32K)
2923 * [1K..1K+A) -> [ES-A..ES)
2924 * [1K+A..ES) -> [0..ES-A-1K)
2926 * where A = @fn * @sz, and ES = EEPROM size.
2928 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2931 if (phys_addr < 1024)
2932 return phys_addr + (31 << 10);
2933 if (phys_addr < 1024 + fn)
2934 return EEPROMSIZE - fn + phys_addr - 1024;
2935 if (phys_addr < EEPROMSIZE)
2936 return phys_addr - 1024 - fn;
2941 * t4_seeprom_wp - enable/disable EEPROM write protection
2942 * @adapter: the adapter
2943 * @enable: whether to enable or disable write protection
2945 * Enables or disables write protection on the serial EEPROM.
2947 int t4_seeprom_wp(struct adapter *adapter, int enable)
2949 return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
2953 * get_vpd_keyword_val - Locates an information field keyword in the VPD
2954 * @vpd: Pointer to buffered vpd data structure
2955 * @kw: The keyword to search for
2956 * @region: VPD region to search (starting from 0)
2958 * Returns the value of the information field keyword or
2959 * -ENOENT otherwise.
2961 static int get_vpd_keyword_val(const u8 *vpd, const char *kw, int region)
2964 unsigned int offset, len;
2965 const struct t4_vpdr_hdr *vpdr;
2967 offset = sizeof(struct t4_vpd_hdr);
2968 vpdr = (const void *)(vpd + offset);
2969 tag = vpdr->vpdr_tag;
2970 len = (u16)vpdr->vpdr_len[0] + ((u16)vpdr->vpdr_len[1] << 8);
2972 offset += sizeof(struct t4_vpdr_hdr) + len;
2973 vpdr = (const void *)(vpd + offset);
2974 if (++tag != vpdr->vpdr_tag)
2976 len = (u16)vpdr->vpdr_len[0] + ((u16)vpdr->vpdr_len[1] << 8);
2978 offset += sizeof(struct t4_vpdr_hdr);
2980 if (offset + len > VPD_LEN) {
2984 for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
2985 if (memcmp(vpd + i , kw , 2) == 0){
2986 i += VPD_INFO_FLD_HDR_SIZE;
2990 i += VPD_INFO_FLD_HDR_SIZE + vpd[i+2];
2998 * get_vpd_params - read VPD parameters from VPD EEPROM
2999 * @adapter: adapter to read
3000 * @p: where to store the parameters
3001 * @vpd: caller provided temporary space to read the VPD into
3003 * Reads card parameters stored in VPD EEPROM.
3005 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p,
3006 uint16_t device_id, u32 *buf)
3009 int ec, sn, pn, na, md;
3011 const u8 *vpd = (const u8 *)buf;
3014 * Card information normally starts at VPD_BASE but early cards had
3017 ret = t4_seeprom_read(adapter, VPD_BASE, buf);
3022 * The VPD shall have a unique identifier specified by the PCI SIG.
3023 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
3024 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
3025 * is expected to automatically put this entry at the
3026 * beginning of the VPD.
3028 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
3030 for (i = 0; i < VPD_LEN; i += 4) {
3031 ret = t4_seeprom_read(adapter, addr + i, buf++);
3036 #define FIND_VPD_KW(var,name) do { \
3037 var = get_vpd_keyword_val(vpd, name, 0); \
3039 CH_ERR(adapter, "missing VPD keyword " name "\n"); \
3044 FIND_VPD_KW(i, "RV");
3045 for (csum = 0; i >= 0; i--)
3050 "corrupted VPD EEPROM, actual csum %u\n", csum);
3054 FIND_VPD_KW(ec, "EC");
3055 FIND_VPD_KW(sn, "SN");
3056 FIND_VPD_KW(pn, "PN");
3057 FIND_VPD_KW(na, "NA");
3060 memcpy(p->id, vpd + offsetof(struct t4_vpd_hdr, id_data), ID_LEN);
3062 memcpy(p->ec, vpd + ec, EC_LEN);
3064 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
3065 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
3067 i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2];
3068 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
3069 strstrip((char *)p->pn);
3070 i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2];
3071 memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
3072 strstrip((char *)p->na);
3074 if (device_id & 0x80)
3075 return 0; /* Custom card */
3077 md = get_vpd_keyword_val(vpd, "VF", 1);
3079 snprintf(p->md, sizeof(p->md), "unknown");
3081 i = vpd[md - VPD_INFO_FLD_HDR_SIZE + 2];
3082 memcpy(p->md, vpd + md, min(i, MD_LEN));
3083 strstrip((char *)p->md);
3089 /* serial flash and firmware constants and flash config file constants */
3091 SF_ATTEMPTS = 10, /* max retries for SF operations */
3093 /* flash command opcodes */
3094 SF_PROG_PAGE = 2, /* program 256B page */
3095 SF_WR_DISABLE = 4, /* disable writes */
3096 SF_RD_STATUS = 5, /* read status register */
3097 SF_WR_ENABLE = 6, /* enable writes */
3098 SF_RD_DATA_FAST = 0xb, /* read flash */
3099 SF_RD_ID = 0x9f, /* read ID */
3100 SF_ERASE_SECTOR = 0xd8, /* erase 64KB sector */
3104 * sf1_read - read data from the serial flash
3105 * @adapter: the adapter
3106 * @byte_cnt: number of bytes to read
3107 * @cont: whether another operation will be chained
3108 * @lock: whether to lock SF for PL access only
3109 * @valp: where to store the read data
3111 * Reads up to 4 bytes of data from the serial flash. The location of
3112 * the read needs to be specified prior to calling this by issuing the
3113 * appropriate commands to the serial flash.
3115 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
3116 int lock, u32 *valp)
3120 if (!byte_cnt || byte_cnt > 4)
3122 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3124 t4_write_reg(adapter, A_SF_OP,
3125 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
3126 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3128 *valp = t4_read_reg(adapter, A_SF_DATA);
3133 * sf1_write - write data to the serial flash
3134 * @adapter: the adapter
3135 * @byte_cnt: number of bytes to write
3136 * @cont: whether another operation will be chained
3137 * @lock: whether to lock SF for PL access only
3138 * @val: value to write
3140 * Writes up to 4 bytes of data to the serial flash. The location of
3141 * the write needs to be specified prior to calling this by issuing the
3142 * appropriate commands to the serial flash.
3144 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
3147 if (!byte_cnt || byte_cnt > 4)
3149 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3151 t4_write_reg(adapter, A_SF_DATA, val);
3152 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
3153 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
3154 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3158 * flash_wait_op - wait for a flash operation to complete
3159 * @adapter: the adapter
3160 * @attempts: max number of polls of the status register
3161 * @delay: delay between polls in ms
3163 * Wait for a flash operation to complete by polling the status register.
3165 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
3171 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
3172 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
3176 if (--attempts == 0)
3184 * t4_read_flash - read words from serial flash
3185 * @adapter: the adapter
3186 * @addr: the start address for the read
3187 * @nwords: how many 32-bit words to read
3188 * @data: where to store the read data
3189 * @byte_oriented: whether to store data as bytes or as words
3191 * Read the specified number of 32-bit words from the serial flash.
3192 * If @byte_oriented is set the read data is stored as a byte array
3193 * (i.e., big-endian), otherwise as 32-bit words in the platform's
3194 * natural endianness.
3196 int t4_read_flash(struct adapter *adapter, unsigned int addr,
3197 unsigned int nwords, u32 *data, int byte_oriented)
3201 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
3204 addr = swab32(addr) | SF_RD_DATA_FAST;
3206 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
3207 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
3210 for ( ; nwords; nwords--, data++) {
3211 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
3213 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3217 *data = (__force __u32)(cpu_to_be32(*data));
3223 * t4_write_flash - write up to a page of data to the serial flash
3224 * @adapter: the adapter
3225 * @addr: the start address to write
3226 * @n: length of data to write in bytes
3227 * @data: the data to write
3228 * @byte_oriented: whether to store data as bytes or as words
3230 * Writes up to a page of data (256 bytes) to the serial flash starting
3231 * at the given address. All the data must be written to the same page.
3232 * If @byte_oriented is set the write data is stored as byte stream
3233 * (i.e. matches what on disk), otherwise in big-endian.
3235 int t4_write_flash(struct adapter *adapter, unsigned int addr,
3236 unsigned int n, const u8 *data, int byte_oriented)
3239 u32 buf[SF_PAGE_SIZE / 4];
3240 unsigned int i, c, left, val, offset = addr & 0xff;
3242 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
3245 val = swab32(addr) | SF_PROG_PAGE;
3247 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3248 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
3251 for (left = n; left; left -= c) {
3253 for (val = 0, i = 0; i < c; ++i)
3254 val = (val << 8) + *data++;
3257 val = cpu_to_be32(val);
3259 ret = sf1_write(adapter, c, c != left, 1, val);
3263 ret = flash_wait_op(adapter, 8, 1);
3267 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3269 /* Read the page to verify the write succeeded */
3270 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
3275 if (memcmp(data - n, (u8 *)buf + offset, n)) {
3277 "failed to correctly write the flash page at %#x\n",
3284 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3289 * t4_get_fw_version - read the firmware version
3290 * @adapter: the adapter
3291 * @vers: where to place the version
3293 * Reads the FW version from flash.
3295 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
3297 return t4_read_flash(adapter, FLASH_FW_START +
3298 offsetof(struct fw_hdr, fw_ver), 1,
3303 * t4_get_fw_hdr - read the firmware header
3304 * @adapter: the adapter
3305 * @hdr: where to place the version
3307 * Reads the FW header from flash into caller provided buffer.
3309 int t4_get_fw_hdr(struct adapter *adapter, struct fw_hdr *hdr)
3311 return t4_read_flash(adapter, FLASH_FW_START,
3312 sizeof (*hdr) / sizeof (uint32_t), (uint32_t *)hdr, 1);
3316 * t4_get_bs_version - read the firmware bootstrap version
3317 * @adapter: the adapter
3318 * @vers: where to place the version
3320 * Reads the FW Bootstrap version from flash.
3322 int t4_get_bs_version(struct adapter *adapter, u32 *vers)
3324 return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
3325 offsetof(struct fw_hdr, fw_ver), 1,
3330 * t4_get_tp_version - read the TP microcode version
3331 * @adapter: the adapter
3332 * @vers: where to place the version
3334 * Reads the TP microcode version from flash.
3336 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
3338 return t4_read_flash(adapter, FLASH_FW_START +
3339 offsetof(struct fw_hdr, tp_microcode_ver),
3344 * t4_get_exprom_version - return the Expansion ROM version (if any)
3345 * @adapter: the adapter
3346 * @vers: where to place the version
3348 * Reads the Expansion ROM header from FLASH and returns the version
3349 * number (if present) through the @vers return value pointer. We return
3350 * this in the Firmware Version Format since it's convenient. Return
3351 * 0 on success, -ENOENT if no Expansion ROM is present.
3353 int t4_get_exprom_version(struct adapter *adapter, u32 *vers)
3355 struct exprom_header {
3356 unsigned char hdr_arr[16]; /* must start with 0x55aa */
3357 unsigned char hdr_ver[4]; /* Expansion ROM version */
3359 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
3363 ret = t4_read_flash(adapter, FLASH_EXP_ROM_START,
3364 ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
3369 hdr = (struct exprom_header *)exprom_header_buf;
3370 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
3373 *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
3374 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
3375 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
3376 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
3381 * t4_get_scfg_version - return the Serial Configuration version
3382 * @adapter: the adapter
3383 * @vers: where to place the version
3385 * Reads the Serial Configuration Version via the Firmware interface
3386 * (thus this can only be called once we're ready to issue Firmware
3387 * commands). The format of the Serial Configuration version is
3388 * adapter specific. Returns 0 on success, an error on failure.
3390 * Note that early versions of the Firmware didn't include the ability
3391 * to retrieve the Serial Configuration version, so we zero-out the
3392 * return-value parameter in that case to avoid leaving it with
3395 * Also note that the Firmware will return its cached copy of the Serial
3396 * Initialization Revision ID, not the actual Revision ID as written in
3397 * the Serial EEPROM. This is only an issue if a new VPD has been written
3398 * and the Firmware/Chip haven't yet gone through a RESET sequence. So
3399 * it's best to defer calling this routine till after a FW_RESET_CMD has
3400 * been issued if the Host Driver will be performing a full adapter
3403 int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
3408 scfgrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3409 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_SCFGREV));
3410 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3411 1, &scfgrev_param, vers);
3418 * t4_get_vpd_version - return the VPD version
3419 * @adapter: the adapter
3420 * @vers: where to place the version
3422 * Reads the VPD via the Firmware interface (thus this can only be called
3423 * once we're ready to issue Firmware commands). The format of the
3424 * VPD version is adapter specific. Returns 0 on success, an error on
3427 * Note that early versions of the Firmware didn't include the ability
3428 * to retrieve the VPD version, so we zero-out the return-value parameter
3429 * in that case to avoid leaving it with garbage in it.
3431 * Also note that the Firmware will return its cached copy of the VPD
3432 * Revision ID, not the actual Revision ID as written in the Serial
3433 * EEPROM. This is only an issue if a new VPD has been written and the
3434 * Firmware/Chip haven't yet gone through a RESET sequence. So it's best
3435 * to defer calling this routine till after a FW_RESET_CMD has been issued
3436 * if the Host Driver will be performing a full adapter initialization.
3438 int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
3443 vpdrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3444 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_VPDREV));
3445 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3446 1, &vpdrev_param, vers);
3453 * t4_get_version_info - extract various chip/firmware version information
3454 * @adapter: the adapter
3456 * Reads various chip/firmware version numbers and stores them into the
3457 * adapter Adapter Parameters structure. If any of the efforts fails
3458 * the first failure will be returned, but all of the version numbers
3461 int t4_get_version_info(struct adapter *adapter)
3465 #define FIRST_RET(__getvinfo) \
3467 int __ret = __getvinfo; \
3468 if (__ret && !ret) \
3472 FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
3473 FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
3474 FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
3475 FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
3476 FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
3477 FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
3485 * t4_flash_erase_sectors - erase a range of flash sectors
3486 * @adapter: the adapter
3487 * @start: the first sector to erase
3488 * @end: the last sector to erase
3490 * Erases the sectors in the given inclusive range.
3492 int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
3496 if (end >= adapter->params.sf_nsec)
3499 while (start <= end) {
3500 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3501 (ret = sf1_write(adapter, 4, 0, 1,
3502 SF_ERASE_SECTOR | (start << 8))) != 0 ||
3503 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
3505 "erase of flash sector %d failed, error %d\n",
3511 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3516 * t4_flash_cfg_addr - return the address of the flash configuration file
3517 * @adapter: the adapter
3519 * Return the address within the flash where the Firmware Configuration
3520 * File is stored, or an error if the device FLASH is too small to contain
3521 * a Firmware Configuration File.
3523 int t4_flash_cfg_addr(struct adapter *adapter)
3526 * If the device FLASH isn't large enough to hold a Firmware
3527 * Configuration File, return an error.
3529 if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
3532 return FLASH_CFG_START;
3536 * Return TRUE if the specified firmware matches the adapter. I.e. T4
3537 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
3538 * and emit an error message for mismatched firmware to save our caller the
3541 static int t4_fw_matches_chip(struct adapter *adap,
3542 const struct fw_hdr *hdr)
3545 * The expression below will return FALSE for any unsupported adapter
3546 * which will keep us "honest" in the future ...
3548 if ((is_t4(adap) && hdr->chip == FW_HDR_CHIP_T4) ||
3549 (is_t5(adap) && hdr->chip == FW_HDR_CHIP_T5) ||
3550 (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6))
3554 "FW image (%d) is not suitable for this adapter (%d)\n",
3555 hdr->chip, chip_id(adap));
3560 * t4_load_fw - download firmware
3561 * @adap: the adapter
3562 * @fw_data: the firmware image to write
3565 * Write the supplied firmware image to the card's serial flash.
3567 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3572 u8 first_page[SF_PAGE_SIZE];
3573 const u32 *p = (const u32 *)fw_data;
3574 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
3575 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
3576 unsigned int fw_start_sec;
3577 unsigned int fw_start;
3578 unsigned int fw_size;
3580 if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) {
3581 fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
3582 fw_start = FLASH_FWBOOTSTRAP_START;
3583 fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
3585 fw_start_sec = FLASH_FW_START_SEC;
3586 fw_start = FLASH_FW_START;
3587 fw_size = FLASH_FW_MAX_SIZE;
3591 CH_ERR(adap, "FW image has no data\n");
3596 "FW image size not multiple of 512 bytes\n");
3599 if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) {
3601 "FW image size differs from size in FW header\n");
3604 if (size > fw_size) {
3605 CH_ERR(adap, "FW image too large, max is %u bytes\n",
3609 if (!t4_fw_matches_chip(adap, hdr))
3612 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
3613 csum += be32_to_cpu(p[i]);
3615 if (csum != 0xffffffff) {
3617 "corrupted firmware image, checksum %#x\n", csum);
3621 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
3622 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
3627 * We write the correct version at the end so the driver can see a bad
3628 * version if the FW write fails. Start by writing a copy of the
3629 * first page with a bad version.
3631 memcpy(first_page, fw_data, SF_PAGE_SIZE);
3632 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
3633 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
3638 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
3639 addr += SF_PAGE_SIZE;
3640 fw_data += SF_PAGE_SIZE;
3641 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
3646 ret = t4_write_flash(adap,
3647 fw_start + offsetof(struct fw_hdr, fw_ver),
3648 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
3651 CH_ERR(adap, "firmware download failed, error %d\n",
3657 * t4_fwcache - firmware cache operation
3658 * @adap: the adapter
3659 * @op : the operation (flush or flush and invalidate)
3661 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
3663 struct fw_params_cmd c;
3665 memset(&c, 0, sizeof(c));
3667 cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
3668 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
3669 V_FW_PARAMS_CMD_PFN(adap->pf) |
3670 V_FW_PARAMS_CMD_VFN(0));
3671 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3673 cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3674 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE));
3675 c.param[0].val = (__force __be32)op;
3677 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
3680 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
3681 unsigned int *pif_req_wrptr,
3682 unsigned int *pif_rsp_wrptr)
3685 u32 cfg, val, req, rsp;
3687 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
3688 if (cfg & F_LADBGEN)
3689 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
3691 val = t4_read_reg(adap, A_CIM_DEBUGSTS);
3692 req = G_POLADBGWRPTR(val);
3693 rsp = G_PILADBGWRPTR(val);
3695 *pif_req_wrptr = req;
3697 *pif_rsp_wrptr = rsp;
3699 for (i = 0; i < CIM_PIFLA_SIZE; i++) {
3700 for (j = 0; j < 6; j++) {
3701 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
3702 V_PILADBGRDPTR(rsp));
3703 *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
3704 *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
3708 req = (req + 2) & M_POLADBGRDPTR;
3709 rsp = (rsp + 2) & M_PILADBGRDPTR;
3711 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
3714 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
3719 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
3720 if (cfg & F_LADBGEN)
3721 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
3723 for (i = 0; i < CIM_MALA_SIZE; i++) {
3724 for (j = 0; j < 5; j++) {
3726 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
3727 V_PILADBGRDPTR(idx));
3728 *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
3729 *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
3732 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
3735 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
3739 for (i = 0; i < 8; i++) {
3740 u32 *p = la_buf + i;
3742 t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
3743 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
3744 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
3745 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
3746 *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
3751 * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
3752 * @caps16: a 16-bit Port Capabilities value
3754 * Returns the equivalent 32-bit Port Capabilities value.
3756 static uint32_t fwcaps16_to_caps32(uint16_t caps16)
3758 uint32_t caps32 = 0;
3760 #define CAP16_TO_CAP32(__cap) \
3762 if (caps16 & FW_PORT_CAP_##__cap) \
3763 caps32 |= FW_PORT_CAP32_##__cap; \
3766 CAP16_TO_CAP32(SPEED_100M);
3767 CAP16_TO_CAP32(SPEED_1G);
3768 CAP16_TO_CAP32(SPEED_25G);
3769 CAP16_TO_CAP32(SPEED_10G);
3770 CAP16_TO_CAP32(SPEED_40G);
3771 CAP16_TO_CAP32(SPEED_100G);
3772 CAP16_TO_CAP32(FC_RX);
3773 CAP16_TO_CAP32(FC_TX);
3774 CAP16_TO_CAP32(ANEG);
3775 CAP16_TO_CAP32(FORCE_PAUSE);
3776 CAP16_TO_CAP32(MDIAUTO);
3777 CAP16_TO_CAP32(MDISTRAIGHT);
3778 CAP16_TO_CAP32(FEC_RS);
3779 CAP16_TO_CAP32(FEC_BASER_RS);
3780 CAP16_TO_CAP32(802_3_PAUSE);
3781 CAP16_TO_CAP32(802_3_ASM_DIR);
3783 #undef CAP16_TO_CAP32
3789 * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
3790 * @caps32: a 32-bit Port Capabilities value
3792 * Returns the equivalent 16-bit Port Capabilities value. Note that
3793 * not all 32-bit Port Capabilities can be represented in the 16-bit
3794 * Port Capabilities and some fields/values may not make it.
3796 static uint16_t fwcaps32_to_caps16(uint32_t caps32)
3798 uint16_t caps16 = 0;
3800 #define CAP32_TO_CAP16(__cap) \
3802 if (caps32 & FW_PORT_CAP32_##__cap) \
3803 caps16 |= FW_PORT_CAP_##__cap; \
3806 CAP32_TO_CAP16(SPEED_100M);
3807 CAP32_TO_CAP16(SPEED_1G);
3808 CAP32_TO_CAP16(SPEED_10G);
3809 CAP32_TO_CAP16(SPEED_25G);
3810 CAP32_TO_CAP16(SPEED_40G);
3811 CAP32_TO_CAP16(SPEED_100G);
3812 CAP32_TO_CAP16(FC_RX);
3813 CAP32_TO_CAP16(FC_TX);
3814 CAP32_TO_CAP16(802_3_PAUSE);
3815 CAP32_TO_CAP16(802_3_ASM_DIR);
3816 CAP32_TO_CAP16(ANEG);
3817 CAP32_TO_CAP16(FORCE_PAUSE);
3818 CAP32_TO_CAP16(MDIAUTO);
3819 CAP32_TO_CAP16(MDISTRAIGHT);
3820 CAP32_TO_CAP16(FEC_RS);
3821 CAP32_TO_CAP16(FEC_BASER_RS);
3823 #undef CAP32_TO_CAP16
3829 is_bt(struct port_info *pi)
3832 return (pi->port_type == FW_PORT_TYPE_BT_SGMII ||
3833 pi->port_type == FW_PORT_TYPE_BT_XFI ||
3834 pi->port_type == FW_PORT_TYPE_BT_XAUI);
3837 static int8_t fwcap_to_fec(uint32_t caps, bool unset_means_none)
3841 if ((caps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)) == 0)
3842 return (unset_means_none ? FEC_NONE : 0);
3844 if (caps & FW_PORT_CAP32_FEC_RS)
3846 if (caps & FW_PORT_CAP32_FEC_BASER_RS)
3847 fec |= FEC_BASER_RS;
3848 if (caps & FW_PORT_CAP32_FEC_NO_FEC)
3855 * Note that 0 is not translated to NO_FEC.
3857 static uint32_t fec_to_fwcap(int8_t fec)
3861 /* Only real FECs allowed. */
3862 MPASS((fec & ~M_FW_PORT_CAP32_FEC) == 0);
3865 caps |= FW_PORT_CAP32_FEC_RS;
3866 if (fec & FEC_BASER_RS)
3867 caps |= FW_PORT_CAP32_FEC_BASER_RS;
3869 caps |= FW_PORT_CAP32_FEC_NO_FEC;
3875 * t4_link_l1cfg - apply link configuration to MAC/PHY
3876 * @phy: the PHY to setup
3877 * @mac: the MAC to setup
3878 * @lc: the requested link configuration
3880 * Set up a port's MAC and PHY according to a desired link configuration.
3881 * - If the PHY can auto-negotiate first decide what to advertise, then
3882 * enable/disable auto-negotiation as desired, and reset.
3883 * - If the PHY does not auto-negotiate just reset it.
3884 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
3885 * otherwise do it later based on the outcome of auto-negotiation.
3887 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
3888 struct link_config *lc)
3890 struct fw_port_cmd c;
3891 unsigned int mdi = V_FW_PORT_CAP32_MDI(FW_PORT_CAP32_MDI_AUTO);
3892 unsigned int aneg, fc, fec, speed, rcap;
3895 if (lc->requested_fc & PAUSE_RX)
3896 fc |= FW_PORT_CAP32_FC_RX;
3897 if (lc->requested_fc & PAUSE_TX)
3898 fc |= FW_PORT_CAP32_FC_TX;
3899 if (!(lc->requested_fc & PAUSE_AUTONEG))
3900 fc |= FW_PORT_CAP32_FORCE_PAUSE;
3902 if (lc->requested_aneg == AUTONEG_DISABLE)
3904 else if (lc->requested_aneg == AUTONEG_ENABLE)
3905 aneg = FW_PORT_CAP32_ANEG;
3907 aneg = lc->pcaps & FW_PORT_CAP32_ANEG;
3911 V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED);
3912 } else if (lc->requested_speed != 0)
3913 speed = speed_to_fwcap(lc->requested_speed);
3915 speed = fwcap_top_speed(lc->pcaps);
3918 if (fec_supported(speed)) {
3919 if (lc->requested_fec == FEC_AUTO) {
3920 if (lc->pcaps & FW_PORT_CAP32_FORCE_FEC) {
3921 if (speed & FW_PORT_CAP32_SPEED_100G) {
3922 fec |= FW_PORT_CAP32_FEC_RS;
3923 fec |= FW_PORT_CAP32_FEC_NO_FEC;
3925 fec |= FW_PORT_CAP32_FEC_RS;
3926 fec |= FW_PORT_CAP32_FEC_BASER_RS;
3927 fec |= FW_PORT_CAP32_FEC_NO_FEC;
3930 /* Set only 1b with old firmwares. */
3931 fec |= fec_to_fwcap(lc->fec_hint);
3934 fec |= fec_to_fwcap(lc->requested_fec &
3935 M_FW_PORT_CAP32_FEC);
3936 if (lc->requested_fec & FEC_MODULE)
3937 fec |= fec_to_fwcap(lc->fec_hint);
3940 if (lc->pcaps & FW_PORT_CAP32_FORCE_FEC)
3941 fec |= FW_PORT_CAP32_FORCE_FEC;
3942 else if (fec == FW_PORT_CAP32_FEC_NO_FEC)
3946 /* Force AN on for BT cards. */
3947 if (is_bt(adap->port[adap->chan_map[port]]))
3948 aneg = lc->pcaps & FW_PORT_CAP32_ANEG;
3950 rcap = aneg | speed | fc | fec;
3951 if ((rcap | lc->pcaps) != lc->pcaps) {
3953 CH_WARN(adap, "rcap 0x%08x, pcap 0x%08x, removed 0x%x\n", rcap,
3954 lc->pcaps, rcap & (rcap ^ lc->pcaps));
3960 memset(&c, 0, sizeof(c));
3961 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
3962 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
3963 V_FW_PORT_CMD_PORTID(port));
3964 if (adap->params.port_caps32) {
3966 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG32) |
3968 c.u.l1cfg32.rcap32 = cpu_to_be32(rcap);
3971 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
3973 c.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap));
3976 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
3980 * t4_restart_aneg - restart autonegotiation
3981 * @adap: the adapter
3982 * @mbox: mbox to use for the FW command
3983 * @port: the port id
3985 * Restarts autonegotiation for the selected port.
3987 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
3989 struct fw_port_cmd c;
3991 memset(&c, 0, sizeof(c));
3992 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
3993 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
3994 V_FW_PORT_CMD_PORTID(port));
3996 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
3998 c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
3999 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4002 struct intr_details {
4007 struct intr_action {
4010 bool (*action)(struct adapter *, int, bool);
4013 #define NONFATAL_IF_DISABLED 1
4015 const char *name; /* name of the INT_CAUSE register */
4016 int cause_reg; /* INT_CAUSE register */
4017 int enable_reg; /* INT_ENABLE register */
4018 u32 fatal; /* bits that are fatal */
4019 int flags; /* hints */
4020 const struct intr_details *details;
4021 const struct intr_action *actions;
4025 intr_alert_char(u32 cause, u32 enable, u32 fatal)
4036 t4_show_intr_info(struct adapter *adap, const struct intr_info *ii, u32 cause)
4038 u32 enable, fatal, leftover;
4039 const struct intr_details *details;
4042 enable = t4_read_reg(adap, ii->enable_reg);
4043 if (ii->flags & NONFATAL_IF_DISABLED)
4044 fatal = ii->fatal & t4_read_reg(adap, ii->enable_reg);
4047 alert = intr_alert_char(cause, enable, fatal);
4048 CH_ALERT(adap, "%c %s 0x%x = 0x%08x, E 0x%08x, F 0x%08x\n",
4049 alert, ii->name, ii->cause_reg, cause, enable, fatal);
4052 for (details = ii->details; details && details->mask != 0; details++) {
4053 u32 msgbits = details->mask & cause;
4056 alert = intr_alert_char(msgbits, enable, ii->fatal);
4057 CH_ALERT(adap, " %c [0x%08x] %s\n", alert, msgbits,
4059 leftover &= ~msgbits;
4061 if (leftover != 0 && leftover != cause)
4062 CH_ALERT(adap, " ? [0x%08x]\n", leftover);
4066 * Returns true for fatal error.
4069 t4_handle_intr(struct adapter *adap, const struct intr_info *ii,
4070 u32 additional_cause, bool verbose)
4074 const struct intr_action *action;
4077 * Read and display cause. Note that the top level PL_INT_CAUSE is a
4078 * bit special and we need to completely ignore the bits that are not in
4081 cause = t4_read_reg(adap, ii->cause_reg);
4082 if (ii->cause_reg == A_PL_INT_CAUSE)
4083 cause &= t4_read_reg(adap, ii->enable_reg);
4084 if (verbose || cause != 0)
4085 t4_show_intr_info(adap, ii, cause);
4086 fatal = cause & ii->fatal;
4087 if (fatal != 0 && ii->flags & NONFATAL_IF_DISABLED)
4088 fatal &= t4_read_reg(adap, ii->enable_reg);
4089 cause |= additional_cause;
4094 for (action = ii->actions; action && action->mask != 0; action++) {
4095 if (!(action->mask & cause))
4097 rc |= (action->action)(adap, action->arg, verbose);
4101 t4_write_reg(adap, ii->cause_reg, cause);
4102 (void)t4_read_reg(adap, ii->cause_reg);
4108 * Interrupt handler for the PCIE module.
4110 static bool pcie_intr_handler(struct adapter *adap, int arg, bool verbose)
4112 static const struct intr_details sysbus_intr_details[] = {
4113 { F_RNPP, "RXNP array parity error" },
4114 { F_RPCP, "RXPC array parity error" },
4115 { F_RCIP, "RXCIF array parity error" },
4116 { F_RCCP, "Rx completions control array parity error" },
4117 { F_RFTP, "RXFT array parity error" },
4120 static const struct intr_info sysbus_intr_info = {
4121 .name = "PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS",
4122 .cause_reg = A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
4123 .enable_reg = A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_INTERRUPT_ENABLE,
4124 .fatal = F_RFTP | F_RCCP | F_RCIP | F_RPCP | F_RNPP,
4126 .details = sysbus_intr_details,
4129 static const struct intr_details pcie_port_intr_details[] = {
4130 { F_TPCP, "TXPC array parity error" },
4131 { F_TNPP, "TXNP array parity error" },
4132 { F_TFTP, "TXFT array parity error" },
4133 { F_TCAP, "TXCA array parity error" },
4134 { F_TCIP, "TXCIF array parity error" },
4135 { F_RCAP, "RXCA array parity error" },
4136 { F_OTDD, "outbound request TLP discarded" },
4137 { F_RDPE, "Rx data parity error" },
4138 { F_TDUE, "Tx uncorrectable data error" },
4141 static const struct intr_info pcie_port_intr_info = {
4142 .name = "PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS",
4143 .cause_reg = A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
4144 .enable_reg = A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_INTERRUPT_ENABLE,
4145 .fatal = F_TPCP | F_TNPP | F_TFTP | F_TCAP | F_TCIP | F_RCAP |
4146 F_OTDD | F_RDPE | F_TDUE,
4148 .details = pcie_port_intr_details,
4151 static const struct intr_details pcie_intr_details[] = {
4152 { F_MSIADDRLPERR, "MSI AddrL parity error" },
4153 { F_MSIADDRHPERR, "MSI AddrH parity error" },
4154 { F_MSIDATAPERR, "MSI data parity error" },
4155 { F_MSIXADDRLPERR, "MSI-X AddrL parity error" },
4156 { F_MSIXADDRHPERR, "MSI-X AddrH parity error" },
4157 { F_MSIXDATAPERR, "MSI-X data parity error" },
4158 { F_MSIXDIPERR, "MSI-X DI parity error" },
4159 { F_PIOCPLPERR, "PCIe PIO completion FIFO parity error" },
4160 { F_PIOREQPERR, "PCIe PIO request FIFO parity error" },
4161 { F_TARTAGPERR, "PCIe target tag FIFO parity error" },
4162 { F_CCNTPERR, "PCIe CMD channel count parity error" },
4163 { F_CREQPERR, "PCIe CMD channel request parity error" },
4164 { F_CRSPPERR, "PCIe CMD channel response parity error" },
4165 { F_DCNTPERR, "PCIe DMA channel count parity error" },
4166 { F_DREQPERR, "PCIe DMA channel request parity error" },
4167 { F_DRSPPERR, "PCIe DMA channel response parity error" },
4168 { F_HCNTPERR, "PCIe HMA channel count parity error" },
4169 { F_HREQPERR, "PCIe HMA channel request parity error" },
4170 { F_HRSPPERR, "PCIe HMA channel response parity error" },
4171 { F_CFGSNPPERR, "PCIe config snoop FIFO parity error" },
4172 { F_FIDPERR, "PCIe FID parity error" },
4173 { F_INTXCLRPERR, "PCIe INTx clear parity error" },
4174 { F_MATAGPERR, "PCIe MA tag parity error" },
4175 { F_PIOTAGPERR, "PCIe PIO tag parity error" },
4176 { F_RXCPLPERR, "PCIe Rx completion parity error" },
4177 { F_RXWRPERR, "PCIe Rx write parity error" },
4178 { F_RPLPERR, "PCIe replay buffer parity error" },
4179 { F_PCIESINT, "PCIe core secondary fault" },
4180 { F_PCIEPINT, "PCIe core primary fault" },
4181 { F_UNXSPLCPLERR, "PCIe unexpected split completion error" },
4184 static const struct intr_details t5_pcie_intr_details[] = {
4185 { F_IPGRPPERR, "Parity errors observed by IP" },
4186 { F_NONFATALERR, "PCIe non-fatal error" },
4187 { F_READRSPERR, "Outbound read error" },
4188 { F_TRGT1GRPPERR, "PCIe TRGT1 group FIFOs parity error" },
4189 { F_IPSOTPERR, "PCIe IP SOT buffer SRAM parity error" },
4190 { F_IPRETRYPERR, "PCIe IP replay buffer parity error" },
4191 { F_IPRXDATAGRPPERR, "PCIe IP Rx data group SRAMs parity error" },
4192 { F_IPRXHDRGRPPERR, "PCIe IP Rx header group SRAMs parity error" },
4193 { F_PIOTAGQPERR, "PIO tag queue FIFO parity error" },
4194 { F_MAGRPPERR, "MA group FIFO parity error" },
4195 { F_VFIDPERR, "VFID SRAM parity error" },
4196 { F_FIDPERR, "FID SRAM parity error" },
4197 { F_CFGSNPPERR, "config snoop FIFO parity error" },
4198 { F_HRSPPERR, "HMA channel response data SRAM parity error" },
4199 { F_HREQRDPERR, "HMA channel read request SRAM parity error" },
4200 { F_HREQWRPERR, "HMA channel write request SRAM parity error" },
4201 { F_DRSPPERR, "DMA channel response data SRAM parity error" },
4202 { F_DREQRDPERR, "DMA channel write request SRAM parity error" },
4203 { F_CRSPPERR, "CMD channel response data SRAM parity error" },
4204 { F_CREQRDPERR, "CMD channel read request SRAM parity error" },
4205 { F_MSTTAGQPERR, "PCIe master tag queue SRAM parity error" },
4206 { F_TGTTAGQPERR, "PCIe target tag queue FIFO parity error" },
4207 { F_PIOREQGRPPERR, "PIO request group FIFOs parity error" },
4208 { F_PIOCPLGRPPERR, "PIO completion group FIFOs parity error" },
4209 { F_MSIXDIPERR, "MSI-X DI SRAM parity error" },
4210 { F_MSIXDATAPERR, "MSI-X data SRAM parity error" },
4211 { F_MSIXADDRHPERR, "MSI-X AddrH SRAM parity error" },
4212 { F_MSIXADDRLPERR, "MSI-X AddrL SRAM parity error" },
4213 { F_MSIXSTIPERR, "MSI-X STI SRAM parity error" },
4214 { F_MSTTIMEOUTPERR, "Master timeout FIFO parity error" },
4215 { F_MSTGRPPERR, "Master response read queue SRAM parity error" },
4218 struct intr_info pcie_intr_info = {
4219 .name = "PCIE_INT_CAUSE",
4220 .cause_reg = A_PCIE_INT_CAUSE,
4221 .enable_reg = A_PCIE_INT_ENABLE,
4222 .fatal = 0xffffffff,
4223 .flags = NONFATAL_IF_DISABLED,
4230 fatal |= t4_handle_intr(adap, &sysbus_intr_info, 0, verbose);
4231 fatal |= t4_handle_intr(adap, &pcie_port_intr_info, 0, verbose);
4233 pcie_intr_info.details = pcie_intr_details;
4235 pcie_intr_info.details = t5_pcie_intr_details;
4237 fatal |= t4_handle_intr(adap, &pcie_intr_info, 0, verbose);
4243 * TP interrupt handler.
4245 static bool tp_intr_handler(struct adapter *adap, int arg, bool verbose)
4247 static const struct intr_details tp_intr_details[] = {
4248 { 0x3fffffff, "TP parity error" },
4249 { F_FLMTXFLSTEMPTY, "TP out of Tx pages" },
4252 static const struct intr_info tp_intr_info = {
4253 .name = "TP_INT_CAUSE",
4254 .cause_reg = A_TP_INT_CAUSE,
4255 .enable_reg = A_TP_INT_ENABLE,
4256 .fatal = 0x7fffffff,
4257 .flags = NONFATAL_IF_DISABLED,
4258 .details = tp_intr_details,
4262 return (t4_handle_intr(adap, &tp_intr_info, 0, verbose));
4266 * SGE interrupt handler.
4268 static bool sge_intr_handler(struct adapter *adap, int arg, bool verbose)
4270 static const struct intr_info sge_int1_info = {
4271 .name = "SGE_INT_CAUSE1",
4272 .cause_reg = A_SGE_INT_CAUSE1,
4273 .enable_reg = A_SGE_INT_ENABLE1,
4274 .fatal = 0xffffffff,
4275 .flags = NONFATAL_IF_DISABLED,
4279 static const struct intr_info sge_int2_info = {
4280 .name = "SGE_INT_CAUSE2",
4281 .cause_reg = A_SGE_INT_CAUSE2,
4282 .enable_reg = A_SGE_INT_ENABLE2,
4283 .fatal = 0xffffffff,
4284 .flags = NONFATAL_IF_DISABLED,
4288 static const struct intr_details sge_int3_details[] = {
4290 "DBP pointer delivery for invalid context or QID" },
4291 { F_ERR_FLM_IDMA1 | F_ERR_FLM_IDMA0,
4292 "Invalid QID or header request by IDMA" },
4293 { F_ERR_FLM_HINT, "FLM hint is for invalid context or QID" },
4294 { F_ERR_PCIE_ERROR3, "SGE PCIe error for DBP thread 3" },
4295 { F_ERR_PCIE_ERROR2, "SGE PCIe error for DBP thread 2" },
4296 { F_ERR_PCIE_ERROR1, "SGE PCIe error for DBP thread 1" },
4297 { F_ERR_PCIE_ERROR0, "SGE PCIe error for DBP thread 0" },
4298 { F_ERR_TIMER_ABOVE_MAX_QID,
4299 "SGE GTS with timer 0-5 for IQID > 1023" },
4300 { F_ERR_CPL_EXCEED_IQE_SIZE,
4301 "SGE received CPL exceeding IQE size" },
4302 { F_ERR_INVALID_CIDX_INC, "SGE GTS CIDX increment too large" },
4303 { F_ERR_ITP_TIME_PAUSED, "SGE ITP error" },
4304 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL" },
4305 { F_ERR_DROPPED_DB, "SGE DB dropped" },
4306 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
4307 "SGE IQID > 1023 received CPL for FL" },
4308 { F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
4309 F_ERR_BAD_DB_PIDX0, "SGE DBP pidx increment too large" },
4310 { F_ERR_ING_PCIE_CHAN, "SGE Ingress PCIe channel mismatch" },
4311 { F_ERR_ING_CTXT_PRIO,
4312 "Ingress context manager priority user error" },
4313 { F_ERR_EGR_CTXT_PRIO,
4314 "Egress context manager priority user error" },
4315 { F_DBFIFO_HP_INT, "High priority DB FIFO threshold reached" },
4316 { F_DBFIFO_LP_INT, "Low priority DB FIFO threshold reached" },
4317 { F_REG_ADDRESS_ERR, "Undefined SGE register accessed" },
4318 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID" },
4319 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID" },
4320 { 0x0000000f, "SGE context access for invalid queue" },
4323 static const struct intr_details t6_sge_int3_details[] = {
4325 "DBP pointer delivery for invalid context or QID" },
4326 { F_ERR_FLM_IDMA1 | F_ERR_FLM_IDMA0,
4327 "Invalid QID or header request by IDMA" },
4328 { F_ERR_FLM_HINT, "FLM hint is for invalid context or QID" },
4329 { F_ERR_PCIE_ERROR3, "SGE PCIe error for DBP thread 3" },
4330 { F_ERR_PCIE_ERROR2, "SGE PCIe error for DBP thread 2" },
4331 { F_ERR_PCIE_ERROR1, "SGE PCIe error for DBP thread 1" },
4332 { F_ERR_PCIE_ERROR0, "SGE PCIe error for DBP thread 0" },
4333 { F_ERR_TIMER_ABOVE_MAX_QID,
4334 "SGE GTS with timer 0-5 for IQID > 1023" },
4335 { F_ERR_CPL_EXCEED_IQE_SIZE,
4336 "SGE received CPL exceeding IQE size" },
4337 { F_ERR_INVALID_CIDX_INC, "SGE GTS CIDX increment too large" },
4338 { F_ERR_ITP_TIME_PAUSED, "SGE ITP error" },
4339 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL" },
4340 { F_ERR_DROPPED_DB, "SGE DB dropped" },
4341 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
4342 "SGE IQID > 1023 received CPL for FL" },
4343 { F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
4344 F_ERR_BAD_DB_PIDX0, "SGE DBP pidx increment too large" },
4345 { F_ERR_ING_PCIE_CHAN, "SGE Ingress PCIe channel mismatch" },
4346 { F_ERR_ING_CTXT_PRIO,
4347 "Ingress context manager priority user error" },
4348 { F_ERR_EGR_CTXT_PRIO,
4349 "Egress context manager priority user error" },
4350 { F_DBP_TBUF_FULL, "SGE DBP tbuf full" },
4352 "SGE WRE packet less than advertized length" },
4353 { F_REG_ADDRESS_ERR, "Undefined SGE register accessed" },
4354 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID" },
4355 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID" },
4356 { 0x0000000f, "SGE context access for invalid queue" },
4359 struct intr_info sge_int3_info = {
4360 .name = "SGE_INT_CAUSE3",
4361 .cause_reg = A_SGE_INT_CAUSE3,
4362 .enable_reg = A_SGE_INT_ENABLE3,
4363 .fatal = F_ERR_CPL_EXCEED_IQE_SIZE,
4368 static const struct intr_info sge_int4_info = {
4369 .name = "SGE_INT_CAUSE4",
4370 .cause_reg = A_SGE_INT_CAUSE4,
4371 .enable_reg = A_SGE_INT_ENABLE4,
4377 static const struct intr_info sge_int5_info = {
4378 .name = "SGE_INT_CAUSE5",
4379 .cause_reg = A_SGE_INT_CAUSE5,
4380 .enable_reg = A_SGE_INT_ENABLE5,
4381 .fatal = 0xffffffff,
4382 .flags = NONFATAL_IF_DISABLED,
4386 static const struct intr_info sge_int6_info = {
4387 .name = "SGE_INT_CAUSE6",
4388 .cause_reg = A_SGE_INT_CAUSE6,
4389 .enable_reg = A_SGE_INT_ENABLE6,
4399 if (chip_id(adap) <= CHELSIO_T5) {
4400 sge_int3_info.details = sge_int3_details;
4402 sge_int3_info.details = t6_sge_int3_details;
4406 fatal |= t4_handle_intr(adap, &sge_int1_info, 0, verbose);
4407 fatal |= t4_handle_intr(adap, &sge_int2_info, 0, verbose);
4408 fatal |= t4_handle_intr(adap, &sge_int3_info, 0, verbose);
4409 fatal |= t4_handle_intr(adap, &sge_int4_info, 0, verbose);
4410 if (chip_id(adap) >= CHELSIO_T5)
4411 fatal |= t4_handle_intr(adap, &sge_int5_info, 0, verbose);
4412 if (chip_id(adap) >= CHELSIO_T6)
4413 fatal |= t4_handle_intr(adap, &sge_int6_info, 0, verbose);
4415 v = t4_read_reg(adap, A_SGE_ERROR_STATS);
4416 if (v & F_ERROR_QID_VALID) {
4417 CH_ERR(adap, "SGE error for QID %u\n", G_ERROR_QID(v));
4418 if (v & F_UNCAPTURED_ERROR)
4419 CH_ERR(adap, "SGE UNCAPTURED_ERROR set (clearing)\n");
4420 t4_write_reg(adap, A_SGE_ERROR_STATS,
4421 F_ERROR_QID_VALID | F_UNCAPTURED_ERROR);
4428 * CIM interrupt handler.
4430 static bool cim_intr_handler(struct adapter *adap, int arg, bool verbose)
4432 static const struct intr_action cim_host_intr_actions[] = {
4433 { F_TIMER0INT, 0, t4_os_dump_cimla },
4436 static const struct intr_details cim_host_intr_details[] = {
4438 { F_PCIE2CIMINTFPARERR, "CIM IBQ PCIe interface parity error" },
4441 { F_MA_CIM_INTFPERR, "MA2CIM interface parity error" },
4442 { F_PLCIM_MSTRSPDATAPARERR,
4443 "PL2CIM master response data parity error" },
4444 { F_NCSI2CIMINTFPARERR, "CIM IBQ NC-SI interface parity error" },
4445 { F_SGE2CIMINTFPARERR, "CIM IBQ SGE interface parity error" },
4446 { F_ULP2CIMINTFPARERR, "CIM IBQ ULP_TX interface parity error" },
4447 { F_TP2CIMINTFPARERR, "CIM IBQ TP interface parity error" },
4448 { F_OBQSGERX1PARERR, "CIM OBQ SGE1_RX parity error" },
4449 { F_OBQSGERX0PARERR, "CIM OBQ SGE0_RX parity error" },
4452 { F_TIEQOUTPARERRINT, "CIM TIEQ outgoing FIFO parity error" },
4453 { F_TIEQINPARERRINT, "CIM TIEQ incoming FIFO parity error" },
4454 { F_MBHOSTPARERR, "CIM mailbox host read parity error" },
4455 { F_MBUPPARERR, "CIM mailbox uP parity error" },
4456 { F_IBQTP0PARERR, "CIM IBQ TP0 parity error" },
4457 { F_IBQTP1PARERR, "CIM IBQ TP1 parity error" },
4458 { F_IBQULPPARERR, "CIM IBQ ULP parity error" },
4459 { F_IBQSGELOPARERR, "CIM IBQ SGE_LO parity error" },
4460 { F_IBQSGEHIPARERR | F_IBQPCIEPARERR, /* same bit */
4461 "CIM IBQ PCIe/SGE_HI parity error" },
4462 { F_IBQNCSIPARERR, "CIM IBQ NC-SI parity error" },
4463 { F_OBQULP0PARERR, "CIM OBQ ULP0 parity error" },
4464 { F_OBQULP1PARERR, "CIM OBQ ULP1 parity error" },
4465 { F_OBQULP2PARERR, "CIM OBQ ULP2 parity error" },
4466 { F_OBQULP3PARERR, "CIM OBQ ULP3 parity error" },
4467 { F_OBQSGEPARERR, "CIM OBQ SGE parity error" },
4468 { F_OBQNCSIPARERR, "CIM OBQ NC-SI parity error" },
4469 { F_TIMER1INT, "CIM TIMER0 interrupt" },
4470 { F_TIMER0INT, "CIM TIMER0 interrupt" },
4471 { F_PREFDROPINT, "CIM control register prefetch drop" },
4474 static const struct intr_info cim_host_intr_info = {
4475 .name = "CIM_HOST_INT_CAUSE",
4476 .cause_reg = A_CIM_HOST_INT_CAUSE,
4477 .enable_reg = A_CIM_HOST_INT_ENABLE,
4478 .fatal = 0x007fffe6,
4479 .flags = NONFATAL_IF_DISABLED,
4480 .details = cim_host_intr_details,
4481 .actions = cim_host_intr_actions,
4483 static const struct intr_details cim_host_upacc_intr_details[] = {
4484 { F_EEPROMWRINT, "CIM EEPROM came out of busy state" },
4485 { F_TIMEOUTMAINT, "CIM PIF MA timeout" },
4486 { F_TIMEOUTINT, "CIM PIF timeout" },
4487 { F_RSPOVRLOOKUPINT, "CIM response FIFO overwrite" },
4488 { F_REQOVRLOOKUPINT, "CIM request FIFO overwrite" },
4489 { F_BLKWRPLINT, "CIM block write to PL space" },
4490 { F_BLKRDPLINT, "CIM block read from PL space" },
4492 "CIM single write to PL space with illegal BEs" },
4494 "CIM single read from PL space with illegal BEs" },
4495 { F_BLKWRCTLINT, "CIM block write to CTL space" },
4496 { F_BLKRDCTLINT, "CIM block read from CTL space" },
4498 "CIM single write to CTL space with illegal BEs" },
4500 "CIM single read from CTL space with illegal BEs" },
4501 { F_BLKWREEPROMINT, "CIM block write to EEPROM space" },
4502 { F_BLKRDEEPROMINT, "CIM block read from EEPROM space" },
4504 "CIM single write to EEPROM space with illegal BEs" },
4506 "CIM single read from EEPROM space with illegal BEs" },
4507 { F_BLKWRFLASHINT, "CIM block write to flash space" },
4508 { F_BLKRDFLASHINT, "CIM block read from flash space" },
4509 { F_SGLWRFLASHINT, "CIM single write to flash space" },
4511 "CIM single read from flash space with illegal BEs" },
4512 { F_BLKWRBOOTINT, "CIM block write to boot space" },
4513 { F_BLKRDBOOTINT, "CIM block read from boot space" },
4514 { F_SGLWRBOOTINT, "CIM single write to boot space" },
4516 "CIM single read from boot space with illegal BEs" },
4517 { F_ILLWRBEINT, "CIM illegal write BEs" },
4518 { F_ILLRDBEINT, "CIM illegal read BEs" },
4519 { F_ILLRDINT, "CIM illegal read" },
4520 { F_ILLWRINT, "CIM illegal write" },
4521 { F_ILLTRANSINT, "CIM illegal transaction" },
4522 { F_RSVDSPACEINT, "CIM reserved space access" },
4525 static const struct intr_info cim_host_upacc_intr_info = {
4526 .name = "CIM_HOST_UPACC_INT_CAUSE",
4527 .cause_reg = A_CIM_HOST_UPACC_INT_CAUSE,
4528 .enable_reg = A_CIM_HOST_UPACC_INT_ENABLE,
4529 .fatal = 0x3fffeeff,
4530 .flags = NONFATAL_IF_DISABLED,
4531 .details = cim_host_upacc_intr_details,
4534 static const struct intr_info cim_pf_host_intr_info = {
4535 .name = "CIM_PF_HOST_INT_CAUSE",
4536 .cause_reg = MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
4537 .enable_reg = MYPF_REG(A_CIM_PF_HOST_INT_ENABLE),
4546 fw_err = t4_read_reg(adap, A_PCIE_FW);
4547 if (fw_err & F_PCIE_FW_ERR)
4548 t4_report_fw_error(adap);
4551 * When the Firmware detects an internal error which normally wouldn't
4552 * raise a Host Interrupt, it forces a CIM Timer0 interrupt in order
4553 * to make sure the Host sees the Firmware Crash. So if we have a
4554 * Timer0 interrupt and don't see a Firmware Crash, ignore the Timer0
4557 val = t4_read_reg(adap, A_CIM_HOST_INT_CAUSE);
4558 if (val & F_TIMER0INT && (!(fw_err & F_PCIE_FW_ERR) ||
4559 G_PCIE_FW_EVAL(fw_err) != PCIE_FW_EVAL_CRASH)) {
4560 t4_write_reg(adap, A_CIM_HOST_INT_CAUSE, F_TIMER0INT);
4564 fatal |= t4_handle_intr(adap, &cim_host_intr_info, 0, verbose);
4565 fatal |= t4_handle_intr(adap, &cim_host_upacc_intr_info, 0, verbose);
4566 fatal |= t4_handle_intr(adap, &cim_pf_host_intr_info, 0, verbose);
4572 * ULP RX interrupt handler.
4574 static bool ulprx_intr_handler(struct adapter *adap, int arg, bool verbose)
4576 static const struct intr_details ulprx_intr_details[] = {
4578 { F_SE_CNT_MISMATCH_1, "ULPRX SE count mismatch in channel 1" },
4579 { F_SE_CNT_MISMATCH_0, "ULPRX SE count mismatch in channel 0" },
4582 { F_CAUSE_CTX_1, "ULPRX channel 1 context error" },
4583 { F_CAUSE_CTX_0, "ULPRX channel 0 context error" },
4584 { 0x007fffff, "ULPRX parity error" },
4587 static const struct intr_info ulprx_intr_info = {
4588 .name = "ULP_RX_INT_CAUSE",
4589 .cause_reg = A_ULP_RX_INT_CAUSE,
4590 .enable_reg = A_ULP_RX_INT_ENABLE,
4591 .fatal = 0x07ffffff,
4592 .flags = NONFATAL_IF_DISABLED,
4593 .details = ulprx_intr_details,
4596 static const struct intr_info ulprx_intr2_info = {
4597 .name = "ULP_RX_INT_CAUSE_2",
4598 .cause_reg = A_ULP_RX_INT_CAUSE_2,
4599 .enable_reg = A_ULP_RX_INT_ENABLE_2,
4607 fatal |= t4_handle_intr(adap, &ulprx_intr_info, 0, verbose);
4608 fatal |= t4_handle_intr(adap, &ulprx_intr2_info, 0, verbose);
4614 * ULP TX interrupt handler.
4616 static bool ulptx_intr_handler(struct adapter *adap, int arg, bool verbose)
4618 static const struct intr_details ulptx_intr_details[] = {
4619 { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds" },
4620 { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds" },
4621 { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds" },
4622 { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds" },
4623 { 0x0fffffff, "ULPTX parity error" },
4626 static const struct intr_info ulptx_intr_info = {
4627 .name = "ULP_TX_INT_CAUSE",
4628 .cause_reg = A_ULP_TX_INT_CAUSE,
4629 .enable_reg = A_ULP_TX_INT_ENABLE,
4630 .fatal = 0x0fffffff,
4631 .flags = NONFATAL_IF_DISABLED,
4632 .details = ulptx_intr_details,
4635 static const struct intr_info ulptx_intr2_info = {
4636 .name = "ULP_TX_INT_CAUSE_2",
4637 .cause_reg = A_ULP_TX_INT_CAUSE_2,
4638 .enable_reg = A_ULP_TX_INT_ENABLE_2,
4640 .flags = NONFATAL_IF_DISABLED,
4646 fatal |= t4_handle_intr(adap, &ulptx_intr_info, 0, verbose);
4647 fatal |= t4_handle_intr(adap, &ulptx_intr2_info, 0, verbose);
4652 static bool pmtx_dump_dbg_stats(struct adapter *adap, int arg, bool verbose)
4657 t4_read_indirect(adap, A_PM_TX_DBG_CTRL, A_PM_TX_DBG_DATA, &data[0],
4658 ARRAY_SIZE(data), A_PM_TX_DBG_STAT0);
4659 for (i = 0; i < ARRAY_SIZE(data); i++) {
4660 CH_ALERT(adap, " - PM_TX_DBG_STAT%u (0x%x) = 0x%08x\n", i,
4661 A_PM_TX_DBG_STAT0 + i, data[i]);
4668 * PM TX interrupt handler.
4670 static bool pmtx_intr_handler(struct adapter *adap, int arg, bool verbose)
4672 static const struct intr_action pmtx_intr_actions[] = {
4673 { 0xffffffff, 0, pmtx_dump_dbg_stats },
4676 static const struct intr_details pmtx_intr_details[] = {
4677 { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large" },
4678 { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large" },
4679 { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large" },
4680 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd" },
4681 { 0x0f000000, "PMTX icspi FIFO2X Rx framing error" },
4682 { 0x00f00000, "PMTX icspi FIFO Rx framing error" },
4683 { 0x000f0000, "PMTX icspi FIFO Tx framing error" },
4684 { 0x0000f000, "PMTX oespi FIFO Rx framing error" },
4685 { 0x00000f00, "PMTX oespi FIFO Tx framing error" },
4686 { 0x000000f0, "PMTX oespi FIFO2X Tx framing error" },
4687 { F_OESPI_PAR_ERROR, "PMTX oespi parity error" },
4688 { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error" },
4689 { F_ICSPI_PAR_ERROR, "PMTX icspi parity error" },
4690 { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error" },
4693 static const struct intr_info pmtx_intr_info = {
4694 .name = "PM_TX_INT_CAUSE",
4695 .cause_reg = A_PM_TX_INT_CAUSE,
4696 .enable_reg = A_PM_TX_INT_ENABLE,
4697 .fatal = 0xffffffff,
4699 .details = pmtx_intr_details,
4700 .actions = pmtx_intr_actions,
4703 return (t4_handle_intr(adap, &pmtx_intr_info, 0, verbose));
4707 * PM RX interrupt handler.
4709 static bool pmrx_intr_handler(struct adapter *adap, int arg, bool verbose)
4711 static const struct intr_details pmrx_intr_details[] = {
4713 { 0x18000000, "PMRX ospi overflow" },
4714 { F_MA_INTF_SDC_ERR, "PMRX MA interface SDC parity error" },
4715 { F_BUNDLE_LEN_PARERR, "PMRX bundle len FIFO parity error" },
4716 { F_BUNDLE_LEN_OVFL, "PMRX bundle len FIFO overflow" },
4717 { F_SDC_ERR, "PMRX SDC error" },
4720 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd" },
4721 { 0x003c0000, "PMRX iespi FIFO2X Rx framing error" },
4722 { 0x0003c000, "PMRX iespi Rx framing error" },
4723 { 0x00003c00, "PMRX iespi Tx framing error" },
4724 { 0x00000300, "PMRX ocspi Rx framing error" },
4725 { 0x000000c0, "PMRX ocspi Tx framing error" },
4726 { 0x00000030, "PMRX ocspi FIFO2X Tx framing error" },
4727 { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error" },
4728 { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error" },
4729 { F_IESPI_PAR_ERROR, "PMRX iespi parity error" },
4730 { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error"},
4733 static const struct intr_info pmrx_intr_info = {
4734 .name = "PM_RX_INT_CAUSE",
4735 .cause_reg = A_PM_RX_INT_CAUSE,
4736 .enable_reg = A_PM_RX_INT_ENABLE,
4737 .fatal = 0x1fffffff,
4738 .flags = NONFATAL_IF_DISABLED,
4739 .details = pmrx_intr_details,
4743 return (t4_handle_intr(adap, &pmrx_intr_info, 0, verbose));
4747 * CPL switch interrupt handler.
4749 static bool cplsw_intr_handler(struct adapter *adap, int arg, bool verbose)
4751 static const struct intr_details cplsw_intr_details[] = {
4753 { F_PERR_CPL_128TO128_1, "CPLSW 128TO128 FIFO1 parity error" },
4754 { F_PERR_CPL_128TO128_0, "CPLSW 128TO128 FIFO0 parity error" },
4757 { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error" },
4758 { F_CIM_OVFL_ERROR, "CPLSW CIM overflow" },
4759 { F_TP_FRAMING_ERROR, "CPLSW TP framing error" },
4760 { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error" },
4761 { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error" },
4762 { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error" },
4765 static const struct intr_info cplsw_intr_info = {
4766 .name = "CPL_INTR_CAUSE",
4767 .cause_reg = A_CPL_INTR_CAUSE,
4768 .enable_reg = A_CPL_INTR_ENABLE,
4770 .flags = NONFATAL_IF_DISABLED,
4771 .details = cplsw_intr_details,
4775 return (t4_handle_intr(adap, &cplsw_intr_info, 0, verbose));
4778 #define T4_LE_FATAL_MASK (F_PARITYERR | F_UNKNOWNCMD | F_REQQPARERR)
4779 #define T5_LE_FATAL_MASK (T4_LE_FATAL_MASK | F_VFPARERR)
4780 #define T6_LE_PERRCRC_MASK (F_PIPELINEERR | F_CLIPTCAMACCFAIL | \
4781 F_SRVSRAMACCFAIL | F_CLCAMCRCPARERR | F_CLCAMINTPERR | F_SSRAMINTPERR | \
4782 F_SRVSRAMPERR | F_VFSRAMPERR | F_TCAMINTPERR | F_TCAMCRCERR | \
4783 F_HASHTBLMEMACCERR | F_MAIFWRINTPERR | F_HASHTBLMEMCRCERR)
4784 #define T6_LE_FATAL_MASK (T6_LE_PERRCRC_MASK | F_T6_UNKNOWNCMD | \
4785 F_TCAMACCFAIL | F_HASHTBLACCFAIL | F_CMDTIDERR | F_CMDPRSRINTERR | \
4786 F_TOTCNTERR | F_CLCAMFIFOERR | F_CLIPSUBERR)
4789 * LE interrupt handler.
4791 static bool le_intr_handler(struct adapter *adap, int arg, bool verbose)
4793 static const struct intr_details le_intr_details[] = {
4794 { F_REQQPARERR, "LE request queue parity error" },
4795 { F_UNKNOWNCMD, "LE unknown command" },
4796 { F_ACTRGNFULL, "LE active region full" },
4797 { F_PARITYERR, "LE parity error" },
4798 { F_LIPMISS, "LE LIP miss" },
4799 { F_LIP0, "LE 0 LIP error" },
4802 static const struct intr_details t6_le_intr_details[] = {
4803 { F_CLIPSUBERR, "LE CLIP CAM reverse substitution error" },
4804 { F_CLCAMFIFOERR, "LE CLIP CAM internal FIFO error" },
4805 { F_CTCAMINVLDENT, "Invalid IPv6 CLIP TCAM entry" },
4806 { F_TCAMINVLDENT, "Invalid IPv6 TCAM entry" },
4807 { F_TOTCNTERR, "LE total active < TCAM count" },
4808 { F_CMDPRSRINTERR, "LE internal error in parser" },
4809 { F_CMDTIDERR, "Incorrect tid in LE command" },
4810 { F_T6_ACTRGNFULL, "LE active region full" },
4811 { F_T6_ACTCNTIPV6TZERO, "LE IPv6 active open TCAM counter -ve" },
4812 { F_T6_ACTCNTIPV4TZERO, "LE IPv4 active open TCAM counter -ve" },
4813 { F_T6_ACTCNTIPV6ZERO, "LE IPv6 active open counter -ve" },
4814 { F_T6_ACTCNTIPV4ZERO, "LE IPv4 active open counter -ve" },
4815 { F_HASHTBLACCFAIL, "Hash table read error (proto conflict)" },
4816 { F_TCAMACCFAIL, "LE TCAM access failure" },
4817 { F_T6_UNKNOWNCMD, "LE unknown command" },
4818 { F_T6_LIP0, "LE found 0 LIP during CLIP substitution" },
4819 { F_T6_LIPMISS, "LE CLIP lookup miss" },
4820 { T6_LE_PERRCRC_MASK, "LE parity/CRC error" },
4823 struct intr_info le_intr_info = {
4824 .name = "LE_DB_INT_CAUSE",
4825 .cause_reg = A_LE_DB_INT_CAUSE,
4826 .enable_reg = A_LE_DB_INT_ENABLE,
4828 .flags = NONFATAL_IF_DISABLED,
4833 if (chip_id(adap) <= CHELSIO_T5) {
4834 le_intr_info.details = le_intr_details;
4835 le_intr_info.fatal = T5_LE_FATAL_MASK;
4837 le_intr_info.details = t6_le_intr_details;
4838 le_intr_info.fatal = T6_LE_FATAL_MASK;
4841 return (t4_handle_intr(adap, &le_intr_info, 0, verbose));
4845 * MPS interrupt handler.
4847 static bool mps_intr_handler(struct adapter *adap, int arg, bool verbose)
4849 static const struct intr_details mps_rx_perr_intr_details[] = {
4850 { 0xffffffff, "MPS Rx parity error" },
4853 static const struct intr_info mps_rx_perr_intr_info = {
4854 .name = "MPS_RX_PERR_INT_CAUSE",
4855 .cause_reg = A_MPS_RX_PERR_INT_CAUSE,
4856 .enable_reg = A_MPS_RX_PERR_INT_ENABLE,
4857 .fatal = 0xffffffff,
4858 .flags = NONFATAL_IF_DISABLED,
4859 .details = mps_rx_perr_intr_details,
4862 static const struct intr_details mps_tx_intr_details[] = {
4863 { F_PORTERR, "MPS Tx destination port is disabled" },
4864 { F_FRMERR, "MPS Tx framing error" },
4865 { F_SECNTERR, "MPS Tx SOP/EOP error" },
4866 { F_BUBBLE, "MPS Tx underflow" },
4867 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error" },
4868 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error" },
4869 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error" },
4870 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error" },
4873 static const struct intr_info mps_tx_intr_info = {
4874 .name = "MPS_TX_INT_CAUSE",
4875 .cause_reg = A_MPS_TX_INT_CAUSE,
4876 .enable_reg = A_MPS_TX_INT_ENABLE,
4878 .flags = NONFATAL_IF_DISABLED,
4879 .details = mps_tx_intr_details,
4882 static const struct intr_details mps_trc_intr_details[] = {
4883 { F_MISCPERR, "MPS TRC misc parity error" },
4884 { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error" },
4885 { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error" },
4888 static const struct intr_info mps_trc_intr_info = {
4889 .name = "MPS_TRC_INT_CAUSE",
4890 .cause_reg = A_MPS_TRC_INT_CAUSE,
4891 .enable_reg = A_MPS_TRC_INT_ENABLE,
4892 .fatal = F_MISCPERR | V_PKTFIFO(M_PKTFIFO) | V_FILTMEM(M_FILTMEM),
4894 .details = mps_trc_intr_details,
4897 static const struct intr_details mps_stat_sram_intr_details[] = {
4898 { 0xffffffff, "MPS statistics SRAM parity error" },
4901 static const struct intr_info mps_stat_sram_intr_info = {
4902 .name = "MPS_STAT_PERR_INT_CAUSE_SRAM",
4903 .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_SRAM,
4904 .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_SRAM,
4905 .fatal = 0x1fffffff,
4906 .flags = NONFATAL_IF_DISABLED,
4907 .details = mps_stat_sram_intr_details,
4910 static const struct intr_details mps_stat_tx_intr_details[] = {
4911 { 0xffffff, "MPS statistics Tx FIFO parity error" },
4914 static const struct intr_info mps_stat_tx_intr_info = {
4915 .name = "MPS_STAT_PERR_INT_CAUSE_TX_FIFO",
4916 .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
4917 .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_TX_FIFO,
4919 .flags = NONFATAL_IF_DISABLED,
4920 .details = mps_stat_tx_intr_details,
4923 static const struct intr_details mps_stat_rx_intr_details[] = {
4924 { 0xffffff, "MPS statistics Rx FIFO parity error" },
4927 static const struct intr_info mps_stat_rx_intr_info = {
4928 .name = "MPS_STAT_PERR_INT_CAUSE_RX_FIFO",
4929 .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
4930 .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_RX_FIFO,
4933 .details = mps_stat_rx_intr_details,
4936 static const struct intr_details mps_cls_intr_details[] = {
4937 { F_HASHSRAM, "MPS hash SRAM parity error" },
4938 { F_MATCHTCAM, "MPS match TCAM parity error" },
4939 { F_MATCHSRAM, "MPS match SRAM parity error" },
4942 static const struct intr_info mps_cls_intr_info = {
4943 .name = "MPS_CLS_INT_CAUSE",
4944 .cause_reg = A_MPS_CLS_INT_CAUSE,
4945 .enable_reg = A_MPS_CLS_INT_ENABLE,
4946 .fatal = F_MATCHSRAM | F_MATCHTCAM | F_HASHSRAM,
4948 .details = mps_cls_intr_details,
4951 static const struct intr_details mps_stat_sram1_intr_details[] = {
4952 { 0xff, "MPS statistics SRAM1 parity error" },
4955 static const struct intr_info mps_stat_sram1_intr_info = {
4956 .name = "MPS_STAT_PERR_INT_CAUSE_SRAM1",
4957 .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_SRAM1,
4958 .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_SRAM1,
4961 .details = mps_stat_sram1_intr_details,
4968 fatal |= t4_handle_intr(adap, &mps_rx_perr_intr_info, 0, verbose);
4969 fatal |= t4_handle_intr(adap, &mps_tx_intr_info, 0, verbose);
4970 fatal |= t4_handle_intr(adap, &mps_trc_intr_info, 0, verbose);
4971 fatal |= t4_handle_intr(adap, &mps_stat_sram_intr_info, 0, verbose);
4972 fatal |= t4_handle_intr(adap, &mps_stat_tx_intr_info, 0, verbose);
4973 fatal |= t4_handle_intr(adap, &mps_stat_rx_intr_info, 0, verbose);
4974 fatal |= t4_handle_intr(adap, &mps_cls_intr_info, 0, verbose);
4975 if (chip_id(adap) > CHELSIO_T4) {
4976 fatal |= t4_handle_intr(adap, &mps_stat_sram1_intr_info, 0,
4980 t4_write_reg(adap, A_MPS_INT_CAUSE, is_t4(adap) ? 0 : 0xffffffff);
4981 t4_read_reg(adap, A_MPS_INT_CAUSE); /* flush */
4988 * EDC/MC interrupt handler.
4990 static bool mem_intr_handler(struct adapter *adap, int idx, bool verbose)
4992 static const char name[4][5] = { "EDC0", "EDC1", "MC0", "MC1" };
4993 unsigned int count_reg, v;
4994 static const struct intr_details mem_intr_details[] = {
4995 { F_ECC_UE_INT_CAUSE, "Uncorrectable ECC data error(s)" },
4996 { F_ECC_CE_INT_CAUSE, "Correctable ECC data error(s)" },
4997 { F_PERR_INT_CAUSE, "FIFO parity error" },
5000 struct intr_info ii = {
5001 .fatal = F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE,
5002 .details = mem_intr_details,
5010 ii.name = "EDC0_INT_CAUSE";
5011 ii.cause_reg = EDC_REG(A_EDC_INT_CAUSE, 0);
5012 ii.enable_reg = EDC_REG(A_EDC_INT_ENABLE, 0);
5013 count_reg = EDC_REG(A_EDC_ECC_STATUS, 0);
5016 ii.name = "EDC1_INT_CAUSE";
5017 ii.cause_reg = EDC_REG(A_EDC_INT_CAUSE, 1);
5018 ii.enable_reg = EDC_REG(A_EDC_INT_ENABLE, 1);
5019 count_reg = EDC_REG(A_EDC_ECC_STATUS, 1);
5022 ii.name = "MC0_INT_CAUSE";
5024 ii.cause_reg = A_MC_INT_CAUSE;
5025 ii.enable_reg = A_MC_INT_ENABLE;
5026 count_reg = A_MC_ECC_STATUS;
5028 ii.cause_reg = A_MC_P_INT_CAUSE;
5029 ii.enable_reg = A_MC_P_INT_ENABLE;
5030 count_reg = A_MC_P_ECC_STATUS;
5034 ii.name = "MC1_INT_CAUSE";
5035 ii.cause_reg = MC_REG(A_MC_P_INT_CAUSE, 1);
5036 ii.enable_reg = MC_REG(A_MC_P_INT_ENABLE, 1);
5037 count_reg = MC_REG(A_MC_P_ECC_STATUS, 1);
5041 fatal = t4_handle_intr(adap, &ii, 0, verbose);
5043 v = t4_read_reg(adap, count_reg);
5045 if (G_ECC_UECNT(v) != 0) {
5047 "%s: %u uncorrectable ECC data error(s)\n",
5048 name[idx], G_ECC_UECNT(v));
5050 if (G_ECC_CECNT(v) != 0) {
5051 if (idx <= MEM_EDC1)
5052 t4_edc_err_read(adap, idx);
5053 CH_WARN_RATELIMIT(adap,
5054 "%s: %u correctable ECC data error(s)\n",
5055 name[idx], G_ECC_CECNT(v));
5057 t4_write_reg(adap, count_reg, 0xffffffff);
5063 static bool ma_wrap_status(struct adapter *adap, int arg, bool verbose)
5067 v = t4_read_reg(adap, A_MA_INT_WRAP_STATUS);
5069 "MA address wrap-around error by client %u to address %#x\n",
5070 G_MEM_WRAP_CLIENT_NUM(v), G_MEM_WRAP_ADDRESS(v) << 4);
5071 t4_write_reg(adap, A_MA_INT_WRAP_STATUS, v);
5078 * MA interrupt handler.
5080 static bool ma_intr_handler(struct adapter *adap, int arg, bool verbose)
5082 static const struct intr_action ma_intr_actions[] = {
5083 { F_MEM_WRAP_INT_CAUSE, 0, ma_wrap_status },
5086 static const struct intr_info ma_intr_info = {
5087 .name = "MA_INT_CAUSE",
5088 .cause_reg = A_MA_INT_CAUSE,
5089 .enable_reg = A_MA_INT_ENABLE,
5090 .fatal = F_MEM_PERR_INT_CAUSE | F_MEM_TO_INT_CAUSE,
5091 .flags = NONFATAL_IF_DISABLED,
5093 .actions = ma_intr_actions,
5095 static const struct intr_info ma_perr_status1 = {
5096 .name = "MA_PARITY_ERROR_STATUS1",
5097 .cause_reg = A_MA_PARITY_ERROR_STATUS1,
5098 .enable_reg = A_MA_PARITY_ERROR_ENABLE1,
5099 .fatal = 0xffffffff,
5104 static const struct intr_info ma_perr_status2 = {
5105 .name = "MA_PARITY_ERROR_STATUS2",
5106 .cause_reg = A_MA_PARITY_ERROR_STATUS2,
5107 .enable_reg = A_MA_PARITY_ERROR_ENABLE2,
5108 .fatal = 0xffffffff,
5116 fatal |= t4_handle_intr(adap, &ma_intr_info, 0, verbose);
5117 fatal |= t4_handle_intr(adap, &ma_perr_status1, 0, verbose);
5118 if (chip_id(adap) > CHELSIO_T4)
5119 fatal |= t4_handle_intr(adap, &ma_perr_status2, 0, verbose);
5125 * SMB interrupt handler.
5127 static bool smb_intr_handler(struct adapter *adap, int arg, bool verbose)
5129 static const struct intr_details smb_intr_details[] = {
5130 { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error" },
5131 { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error" },
5132 { F_SLVFIFOPARINT, "SMB slave FIFO parity error" },
5135 static const struct intr_info smb_intr_info = {
5136 .name = "SMB_INT_CAUSE",
5137 .cause_reg = A_SMB_INT_CAUSE,
5138 .enable_reg = A_SMB_INT_ENABLE,
5139 .fatal = F_SLVFIFOPARINT | F_MSTRXFIFOPARINT | F_MSTTXFIFOPARINT,
5141 .details = smb_intr_details,
5145 return (t4_handle_intr(adap, &smb_intr_info, 0, verbose));
5149 * NC-SI interrupt handler.
5151 static bool ncsi_intr_handler(struct adapter *adap, int arg, bool verbose)
5153 static const struct intr_details ncsi_intr_details[] = {
5154 { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error" },
5155 { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error" },
5156 { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error" },
5157 { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error" },
5160 static const struct intr_info ncsi_intr_info = {
5161 .name = "NCSI_INT_CAUSE",
5162 .cause_reg = A_NCSI_INT_CAUSE,
5163 .enable_reg = A_NCSI_INT_ENABLE,
5164 .fatal = F_RXFIFO_PRTY_ERR | F_TXFIFO_PRTY_ERR |
5165 F_MPS_DM_PRTY_ERR | F_CIM_DM_PRTY_ERR,
5167 .details = ncsi_intr_details,
5171 return (t4_handle_intr(adap, &ncsi_intr_info, 0, verbose));
5175 * MAC interrupt handler.
5177 static bool mac_intr_handler(struct adapter *adap, int port, bool verbose)
5179 static const struct intr_details mac_intr_details[] = {
5180 { F_TXFIFO_PRTY_ERR, "MAC Tx FIFO parity error" },
5181 { F_RXFIFO_PRTY_ERR, "MAC Rx FIFO parity error" },
5185 struct intr_info ii;
5189 snprintf(name, sizeof(name), "XGMAC_PORT%u_INT_CAUSE", port);
5191 ii.cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
5192 ii.enable_reg = PORT_REG(port, A_XGMAC_PORT_INT_EN);
5193 ii.fatal = F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR;
5195 ii.details = mac_intr_details;
5198 snprintf(name, sizeof(name), "MAC_PORT%u_INT_CAUSE", port);
5200 ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
5201 ii.enable_reg = T5_PORT_REG(port, A_MAC_PORT_INT_EN);
5202 ii.fatal = F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR;
5204 ii.details = mac_intr_details;
5207 fatal |= t4_handle_intr(adap, &ii, 0, verbose);
5209 if (chip_id(adap) >= CHELSIO_T5) {
5210 snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE", port);
5212 ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_CAUSE);
5213 ii.enable_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_EN);
5218 fatal |= t4_handle_intr(adap, &ii, 0, verbose);
5221 if (chip_id(adap) >= CHELSIO_T6) {
5222 snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE_100G", port);
5224 ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_CAUSE_100G);
5225 ii.enable_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_EN_100G);
5230 fatal |= t4_handle_intr(adap, &ii, 0, verbose);
5236 static bool plpl_intr_handler(struct adapter *adap, int arg, bool verbose)
5238 static const struct intr_details plpl_intr_details[] = {
5239 { F_FATALPERR, "Fatal parity error" },
5240 { F_PERRVFID, "VFID_MAP parity error" },
5243 static const struct intr_info plpl_intr_info = {
5244 .name = "PL_PL_INT_CAUSE",
5245 .cause_reg = A_PL_PL_INT_CAUSE,
5246 .enable_reg = A_PL_PL_INT_ENABLE,
5247 .fatal = F_FATALPERR | F_PERRVFID,
5248 .flags = NONFATAL_IF_DISABLED,
5249 .details = plpl_intr_details,
5253 return (t4_handle_intr(adap, &plpl_intr_info, 0, verbose));
5257 * t4_slow_intr_handler - control path interrupt handler
5258 * @adap: the adapter
5259 * @verbose: increased verbosity, for debug
5261 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
5262 * The designation 'slow' is because it involves register reads, while
5263 * data interrupts typically don't involve any MMIOs.
5265 int t4_slow_intr_handler(struct adapter *adap, bool verbose)
5267 static const struct intr_details pl_intr_details[] = {
5270 { F_ULP_TX, "ULP TX" },
5273 { F_CPL_SWITCH, "CPL Switch" },
5274 { F_ULP_RX, "ULP RX" },
5275 { F_PM_RX, "PM RX" },
5276 { F_PM_TX, "PM TX" },
5292 { F_NCSI, "NC-SI" },
5300 static const struct intr_info pl_perr_cause = {
5301 .name = "PL_PERR_CAUSE",
5302 .cause_reg = A_PL_PERR_CAUSE,
5303 .enable_reg = A_PL_PERR_ENABLE,
5304 .fatal = 0xffffffff,
5306 .details = pl_intr_details,
5309 static const struct intr_action pl_intr_action[] = {
5310 { F_MC1, MEM_MC1, mem_intr_handler },
5311 { F_ULP_TX, -1, ulptx_intr_handler },
5312 { F_SGE, -1, sge_intr_handler },
5313 { F_CPL_SWITCH, -1, cplsw_intr_handler },
5314 { F_ULP_RX, -1, ulprx_intr_handler },
5315 { F_PM_RX, -1, pmrx_intr_handler},
5316 { F_PM_TX, -1, pmtx_intr_handler},
5317 { F_MA, -1, ma_intr_handler },
5318 { F_TP, -1, tp_intr_handler },
5319 { F_LE, -1, le_intr_handler },
5320 { F_EDC1, MEM_EDC1, mem_intr_handler },
5321 { F_EDC0, MEM_EDC0, mem_intr_handler },
5322 { F_MC0, MEM_MC0, mem_intr_handler },
5323 { F_PCIE, -1, pcie_intr_handler },
5324 { F_MAC3, 3, mac_intr_handler},
5325 { F_MAC2, 2, mac_intr_handler},
5326 { F_MAC1, 1, mac_intr_handler},
5327 { F_MAC0, 0, mac_intr_handler},
5328 { F_SMB, -1, smb_intr_handler},
5329 { F_PL, -1, plpl_intr_handler },
5330 { F_NCSI, -1, ncsi_intr_handler},
5331 { F_MPS, -1, mps_intr_handler },
5332 { F_CIM, -1, cim_intr_handler },
5335 static const struct intr_info pl_intr_info = {
5336 .name = "PL_INT_CAUSE",
5337 .cause_reg = A_PL_INT_CAUSE,
5338 .enable_reg = A_PL_INT_ENABLE,
5341 .details = pl_intr_details,
5342 .actions = pl_intr_action,
5347 perr = t4_read_reg(adap, pl_perr_cause.cause_reg);
5348 if (verbose || perr != 0) {
5349 t4_show_intr_info(adap, &pl_perr_cause, perr);
5351 t4_write_reg(adap, pl_perr_cause.cause_reg, perr);
5353 perr |= t4_read_reg(adap, pl_intr_info.enable_reg);
5355 fatal = t4_handle_intr(adap, &pl_intr_info, perr, verbose);
5357 t4_fatal_err(adap, false);
5362 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
5365 * t4_intr_enable - enable interrupts
5366 * @adapter: the adapter whose interrupts should be enabled
5368 * Enable PF-specific interrupts for the calling function and the top-level
5369 * interrupt concentrator for global interrupts. Interrupts are already
5370 * enabled at each module, here we just enable the roots of the interrupt
5373 * Note: this function should be called only when the driver manages
5374 * non PF-specific interrupts from the various HW modules. Only one PCI
5375 * function at a time should be doing this.
5377 void t4_intr_enable(struct adapter *adap)
5381 if (chip_id(adap) <= CHELSIO_T5)
5382 val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
5384 val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN;
5385 val |= F_ERR_CPL_EXCEED_IQE_SIZE | F_ERR_INVALID_CIDX_INC |
5386 F_ERR_CPL_OPCODE_0 | F_ERR_DATA_CPL_ON_HIGH_QID1 |
5387 F_INGRESS_SIZE_ERR | F_ERR_DATA_CPL_ON_HIGH_QID0 |
5388 F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
5389 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | F_DBFIFO_LP_INT |
5391 t4_set_reg_field(adap, A_SGE_INT_ENABLE3, val, val);
5392 t4_write_reg(adap, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
5393 t4_set_reg_field(adap, A_PL_INT_ENABLE, F_SF | F_I2CM, 0);
5394 t4_set_reg_field(adap, A_PL_INT_MAP0, 0, 1 << adap->pf);
5398 * t4_intr_disable - disable interrupts
5399 * @adap: the adapter whose interrupts should be disabled
5401 * Disable interrupts. We only disable the top-level interrupt
5402 * concentrators. The caller must be a PCI function managing global
5405 void t4_intr_disable(struct adapter *adap)
5408 t4_write_reg(adap, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
5409 t4_set_reg_field(adap, A_PL_INT_MAP0, 1 << adap->pf, 0);
5413 * t4_intr_clear - clear all interrupts
5414 * @adap: the adapter whose interrupts should be cleared
5416 * Clears all interrupts. The caller must be a PCI function managing
5417 * global interrupts.
5419 void t4_intr_clear(struct adapter *adap)
5421 static const u32 cause_reg[] = {
5422 A_CIM_HOST_INT_CAUSE,
5423 A_CIM_HOST_UPACC_INT_CAUSE,
5424 MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
5426 EDC_REG(A_EDC_INT_CAUSE, 0), EDC_REG(A_EDC_INT_CAUSE, 1),
5428 A_MA_INT_WRAP_STATUS,
5429 A_MA_PARITY_ERROR_STATUS1,
5431 A_MPS_CLS_INT_CAUSE,
5432 A_MPS_RX_PERR_INT_CAUSE,
5433 A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
5434 A_MPS_STAT_PERR_INT_CAUSE_SRAM,
5435 A_MPS_TRC_INT_CAUSE,
5437 A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
5451 A_ULP_RX_INT_CAUSE_2,
5453 A_ULP_TX_INT_CAUSE_2,
5455 MYPF_REG(A_PL_PF_INT_CAUSE),
5458 const int nchan = adap->chip_params->nchan;
5460 for (i = 0; i < ARRAY_SIZE(cause_reg); i++)
5461 t4_write_reg(adap, cause_reg[i], 0xffffffff);
5464 t4_write_reg(adap, A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
5466 t4_write_reg(adap, A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
5468 t4_write_reg(adap, A_MC_INT_CAUSE, 0xffffffff);
5469 for (i = 0; i < nchan; i++) {
5470 t4_write_reg(adap, PORT_REG(i, A_XGMAC_PORT_INT_CAUSE),
5474 if (chip_id(adap) >= CHELSIO_T5) {
5475 t4_write_reg(adap, A_MA_PARITY_ERROR_STATUS2, 0xffffffff);
5476 t4_write_reg(adap, A_MPS_STAT_PERR_INT_CAUSE_SRAM1, 0xffffffff);
5477 t4_write_reg(adap, A_SGE_INT_CAUSE5, 0xffffffff);
5478 t4_write_reg(adap, A_MC_P_INT_CAUSE, 0xffffffff);
5480 t4_write_reg(adap, MC_REG(A_MC_P_INT_CAUSE, 1),
5483 for (i = 0; i < nchan; i++) {
5484 t4_write_reg(adap, T5_PORT_REG(i,
5485 A_MAC_PORT_PERR_INT_CAUSE), 0xffffffff);
5486 if (chip_id(adap) > CHELSIO_T5) {
5487 t4_write_reg(adap, T5_PORT_REG(i,
5488 A_MAC_PORT_PERR_INT_CAUSE_100G),
5491 t4_write_reg(adap, T5_PORT_REG(i, A_MAC_PORT_INT_CAUSE),
5495 if (chip_id(adap) >= CHELSIO_T6) {
5496 t4_write_reg(adap, A_SGE_INT_CAUSE6, 0xffffffff);
5499 t4_write_reg(adap, A_MPS_INT_CAUSE, is_t4(adap) ? 0 : 0xffffffff);
5500 t4_write_reg(adap, A_PL_PERR_CAUSE, 0xffffffff);
5501 t4_write_reg(adap, A_PL_INT_CAUSE, 0xffffffff);
5502 (void) t4_read_reg(adap, A_PL_INT_CAUSE); /* flush */
5506 * hash_mac_addr - return the hash value of a MAC address
5507 * @addr: the 48-bit Ethernet MAC address
5509 * Hashes a MAC address according to the hash function used by HW inexact
5510 * (hash) address matching.
5512 static int hash_mac_addr(const u8 *addr)
5514 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
5515 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
5523 * t4_config_rss_range - configure a portion of the RSS mapping table
5524 * @adapter: the adapter
5525 * @mbox: mbox to use for the FW command
5526 * @viid: virtual interface whose RSS subtable is to be written
5527 * @start: start entry in the table to write
5528 * @n: how many table entries to write
5529 * @rspq: values for the "response queue" (Ingress Queue) lookup table
5530 * @nrspq: number of values in @rspq
5532 * Programs the selected part of the VI's RSS mapping table with the
5533 * provided values. If @nrspq < @n the supplied values are used repeatedly
5534 * until the full table range is populated.
5536 * The caller must ensure the values in @rspq are in the range allowed for
5539 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
5540 int start, int n, const u16 *rspq, unsigned int nrspq)
5543 const u16 *rsp = rspq;
5544 const u16 *rsp_end = rspq + nrspq;
5545 struct fw_rss_ind_tbl_cmd cmd;
5547 memset(&cmd, 0, sizeof(cmd));
5548 cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
5549 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
5550 V_FW_RSS_IND_TBL_CMD_VIID(viid));
5551 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
5554 * Each firmware RSS command can accommodate up to 32 RSS Ingress
5555 * Queue Identifiers. These Ingress Queue IDs are packed three to
5556 * a 32-bit word as 10-bit values with the upper remaining 2 bits
5560 int nq = min(n, 32);
5562 __be32 *qp = &cmd.iq0_to_iq2;
5565 * Set up the firmware RSS command header to send the next
5566 * "nq" Ingress Queue IDs to the firmware.
5568 cmd.niqid = cpu_to_be16(nq);
5569 cmd.startidx = cpu_to_be16(start);
5572 * "nq" more done for the start of the next loop.
5578 * While there are still Ingress Queue IDs to stuff into the
5579 * current firmware RSS command, retrieve them from the
5580 * Ingress Queue ID array and insert them into the command.
5584 * Grab up to the next 3 Ingress Queue IDs (wrapping
5585 * around the Ingress Queue ID array if necessary) and
5586 * insert them into the firmware RSS command at the
5587 * current 3-tuple position within the commad.
5591 int nqbuf = min(3, nq);
5594 qbuf[0] = qbuf[1] = qbuf[2] = 0;
5595 while (nqbuf && nq_packed < 32) {
5602 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
5603 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
5604 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
5608 * Send this portion of the RRS table update to the firmware;
5609 * bail out on any errors.
5611 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
5619 * t4_config_glbl_rss - configure the global RSS mode
5620 * @adapter: the adapter
5621 * @mbox: mbox to use for the FW command
5622 * @mode: global RSS mode
5623 * @flags: mode-specific flags
5625 * Sets the global RSS mode.
5627 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
5630 struct fw_rss_glb_config_cmd c;
5632 memset(&c, 0, sizeof(c));
5633 c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
5634 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
5635 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5636 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
5637 c.u.manual.mode_pkd =
5638 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
5639 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
5640 c.u.basicvirtual.mode_keymode =
5641 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
5642 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
5645 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5649 * t4_config_vi_rss - configure per VI RSS settings
5650 * @adapter: the adapter
5651 * @mbox: mbox to use for the FW command
5654 * @defq: id of the default RSS queue for the VI.
5655 * @skeyidx: RSS secret key table index for non-global mode
5656 * @skey: RSS vf_scramble key for VI.
5658 * Configures VI-specific RSS properties.
5660 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
5661 unsigned int flags, unsigned int defq, unsigned int skeyidx,
5664 struct fw_rss_vi_config_cmd c;
5666 memset(&c, 0, sizeof(c));
5667 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
5668 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
5669 V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
5670 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5671 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
5672 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
5673 c.u.basicvirtual.secretkeyidx_pkd = cpu_to_be32(
5674 V_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(skeyidx));
5675 c.u.basicvirtual.secretkeyxor = cpu_to_be32(skey);
5677 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5680 /* Read an RSS table row */
5681 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
5683 t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
5684 return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
5689 * t4_read_rss - read the contents of the RSS mapping table
5690 * @adapter: the adapter
5691 * @map: holds the contents of the RSS mapping table
5693 * Reads the contents of the RSS hash->queue mapping table.
5695 int t4_read_rss(struct adapter *adapter, u16 *map)
5699 int rss_nentries = adapter->chip_params->rss_nentries;
5701 for (i = 0; i < rss_nentries / 2; ++i) {
5702 ret = rd_rss_row(adapter, i, &val);
5705 *map++ = G_LKPTBLQUEUE0(val);
5706 *map++ = G_LKPTBLQUEUE1(val);
5712 * t4_tp_fw_ldst_rw - Access TP indirect register through LDST
5713 * @adap: the adapter
5714 * @cmd: TP fw ldst address space type
5715 * @vals: where the indirect register values are stored/written
5716 * @nregs: how many indirect registers to read/write
5717 * @start_idx: index of first indirect register to read/write
5718 * @rw: Read (1) or Write (0)
5719 * @sleep_ok: if true we may sleep while awaiting command completion
5721 * Access TP indirect registers through LDST
5723 static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals,
5724 unsigned int nregs, unsigned int start_index,
5725 unsigned int rw, bool sleep_ok)
5729 struct fw_ldst_cmd c;
5731 for (i = 0; i < nregs; i++) {
5732 memset(&c, 0, sizeof(c));
5733 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
5735 (rw ? F_FW_CMD_READ :
5737 V_FW_LDST_CMD_ADDRSPACE(cmd));
5738 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5740 c.u.addrval.addr = cpu_to_be32(start_index + i);
5741 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
5742 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c,
5748 vals[i] = be32_to_cpu(c.u.addrval.val);
5754 * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor
5755 * @adap: the adapter
5756 * @reg_addr: Address Register
5757 * @reg_data: Data register
5758 * @buff: where the indirect register values are stored/written
5759 * @nregs: how many indirect registers to read/write
5760 * @start_index: index of first indirect register to read/write
5761 * @rw: READ(1) or WRITE(0)
5762 * @sleep_ok: if true we may sleep while awaiting command completion
5764 * Read/Write TP indirect registers through LDST if possible.
5765 * Else, use backdoor access
5767 static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data,
5768 u32 *buff, u32 nregs, u32 start_index, int rw,
5776 cmd = FW_LDST_ADDRSPC_TP_PIO;
5778 case A_TP_TM_PIO_ADDR:
5779 cmd = FW_LDST_ADDRSPC_TP_TM_PIO;
5781 case A_TP_MIB_INDEX:
5782 cmd = FW_LDST_ADDRSPC_TP_MIB;
5785 goto indirect_access;
5788 if (t4_use_ldst(adap))
5789 rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw,
5796 t4_read_indirect(adap, reg_addr, reg_data, buff, nregs,
5799 t4_write_indirect(adap, reg_addr, reg_data, buff, nregs,
5805 * t4_tp_pio_read - Read TP PIO registers
5806 * @adap: the adapter
5807 * @buff: where the indirect register values are written
5808 * @nregs: how many indirect registers to read
5809 * @start_index: index of first indirect register to read
5810 * @sleep_ok: if true we may sleep while awaiting command completion
5812 * Read TP PIO Registers
5814 void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5815 u32 start_index, bool sleep_ok)
5817 t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs,
5818 start_index, 1, sleep_ok);
5822 * t4_tp_pio_write - Write TP PIO registers
5823 * @adap: the adapter
5824 * @buff: where the indirect register values are stored
5825 * @nregs: how many indirect registers to write
5826 * @start_index: index of first indirect register to write
5827 * @sleep_ok: if true we may sleep while awaiting command completion
5829 * Write TP PIO Registers
5831 void t4_tp_pio_write(struct adapter *adap, const u32 *buff, u32 nregs,
5832 u32 start_index, bool sleep_ok)
5834 t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5835 __DECONST(u32 *, buff), nregs, start_index, 0, sleep_ok);
5839 * t4_tp_tm_pio_read - Read TP TM PIO registers
5840 * @adap: the adapter
5841 * @buff: where the indirect register values are written
5842 * @nregs: how many indirect registers to read
5843 * @start_index: index of first indirect register to read
5844 * @sleep_ok: if true we may sleep while awaiting command completion
5846 * Read TP TM PIO Registers
5848 void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5849 u32 start_index, bool sleep_ok)
5851 t4_tp_indirect_rw(adap, A_TP_TM_PIO_ADDR, A_TP_TM_PIO_DATA, buff,
5852 nregs, start_index, 1, sleep_ok);
5856 * t4_tp_mib_read - Read TP MIB registers
5857 * @adap: the adapter
5858 * @buff: where the indirect register values are written
5859 * @nregs: how many indirect registers to read
5860 * @start_index: index of first indirect register to read
5861 * @sleep_ok: if true we may sleep while awaiting command completion
5863 * Read TP MIB Registers
5865 void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index,
5868 t4_tp_indirect_rw(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, buff, nregs,
5869 start_index, 1, sleep_ok);
5873 * t4_read_rss_key - read the global RSS key
5874 * @adap: the adapter
5875 * @key: 10-entry array holding the 320-bit RSS key
5876 * @sleep_ok: if true we may sleep while awaiting command completion
5878 * Reads the global 320-bit RSS key.
5880 void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok)
5882 t4_tp_pio_read(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
5886 * t4_write_rss_key - program one of the RSS keys
5887 * @adap: the adapter
5888 * @key: 10-entry array holding the 320-bit RSS key
5889 * @idx: which RSS key to write
5890 * @sleep_ok: if true we may sleep while awaiting command completion
5892 * Writes one of the RSS keys with the given 320-bit value. If @idx is
5893 * 0..15 the corresponding entry in the RSS key table is written,
5894 * otherwise the global RSS key is written.
5896 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
5899 u8 rss_key_addr_cnt = 16;
5900 u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
5903 * T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
5904 * allows access to key addresses 16-63 by using KeyWrAddrX
5905 * as index[5:4](upper 2) into key table
5907 if ((chip_id(adap) > CHELSIO_T5) &&
5908 (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
5909 rss_key_addr_cnt = 32;
5911 t4_tp_pio_write(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
5913 if (idx >= 0 && idx < rss_key_addr_cnt) {
5914 if (rss_key_addr_cnt > 16)
5915 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
5916 vrt | V_KEYWRADDRX(idx >> 4) |
5917 V_T6_VFWRADDR(idx) | F_KEYWREN);
5919 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
5920 vrt| V_KEYWRADDR(idx) | F_KEYWREN);
5925 * t4_read_rss_pf_config - read PF RSS Configuration Table
5926 * @adapter: the adapter
5927 * @index: the entry in the PF RSS table to read
5928 * @valp: where to store the returned value
5929 * @sleep_ok: if true we may sleep while awaiting command completion
5931 * Reads the PF RSS Configuration Table at the specified index and returns
5932 * the value found there.
5934 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
5935 u32 *valp, bool sleep_ok)
5937 t4_tp_pio_read(adapter, valp, 1, A_TP_RSS_PF0_CONFIG + index, sleep_ok);
5941 * t4_write_rss_pf_config - write PF RSS Configuration Table
5942 * @adapter: the adapter
5943 * @index: the entry in the VF RSS table to read
5944 * @val: the value to store
5945 * @sleep_ok: if true we may sleep while awaiting command completion
5947 * Writes the PF RSS Configuration Table at the specified index with the
5950 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index,
5951 u32 val, bool sleep_ok)
5953 t4_tp_pio_write(adapter, &val, 1, A_TP_RSS_PF0_CONFIG + index,
5958 * t4_read_rss_vf_config - read VF RSS Configuration Table
5959 * @adapter: the adapter
5960 * @index: the entry in the VF RSS table to read
5961 * @vfl: where to store the returned VFL
5962 * @vfh: where to store the returned VFH
5963 * @sleep_ok: if true we may sleep while awaiting command completion
5965 * Reads the VF RSS Configuration Table at the specified index and returns
5966 * the (VFL, VFH) values found there.
5968 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
5969 u32 *vfl, u32 *vfh, bool sleep_ok)
5971 u32 vrt, mask, data;
5973 if (chip_id(adapter) <= CHELSIO_T5) {
5974 mask = V_VFWRADDR(M_VFWRADDR);
5975 data = V_VFWRADDR(index);
5977 mask = V_T6_VFWRADDR(M_T6_VFWRADDR);
5978 data = V_T6_VFWRADDR(index);
5981 * Request that the index'th VF Table values be read into VFL/VFH.
5983 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
5984 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
5985 vrt |= data | F_VFRDEN;
5986 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
5989 * Grab the VFL/VFH values ...
5991 t4_tp_pio_read(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok);
5992 t4_tp_pio_read(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok);
5996 * t4_write_rss_vf_config - write VF RSS Configuration Table
5998 * @adapter: the adapter
5999 * @index: the entry in the VF RSS table to write
6000 * @vfl: the VFL to store
6001 * @vfh: the VFH to store
6003 * Writes the VF RSS Configuration Table at the specified index with the
6004 * specified (VFL, VFH) values.
6006 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
6007 u32 vfl, u32 vfh, bool sleep_ok)
6009 u32 vrt, mask, data;
6011 if (chip_id(adapter) <= CHELSIO_T5) {
6012 mask = V_VFWRADDR(M_VFWRADDR);
6013 data = V_VFWRADDR(index);
6015 mask = V_T6_VFWRADDR(M_T6_VFWRADDR);
6016 data = V_T6_VFWRADDR(index);
6020 * Load up VFL/VFH with the values to be written ...
6022 t4_tp_pio_write(adapter, &vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok);
6023 t4_tp_pio_write(adapter, &vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok);
6026 * Write the VFL/VFH into the VF Table at index'th location.
6028 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
6029 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
6030 vrt |= data | F_VFRDEN;
6031 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
6035 * t4_read_rss_pf_map - read PF RSS Map
6036 * @adapter: the adapter
6037 * @sleep_ok: if true we may sleep while awaiting command completion
6039 * Reads the PF RSS Map register and returns its value.
6041 u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok)
6045 t4_tp_pio_read(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok);
6051 * t4_write_rss_pf_map - write PF RSS Map
6052 * @adapter: the adapter
6053 * @pfmap: PF RSS Map value
6055 * Writes the specified value to the PF RSS Map register.
6057 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap, bool sleep_ok)
6059 t4_tp_pio_write(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok);
6063 * t4_read_rss_pf_mask - read PF RSS Mask
6064 * @adapter: the adapter
6065 * @sleep_ok: if true we may sleep while awaiting command completion
6067 * Reads the PF RSS Mask register and returns its value.
6069 u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok)
6073 t4_tp_pio_read(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok);
6079 * t4_write_rss_pf_mask - write PF RSS Mask
6080 * @adapter: the adapter
6081 * @pfmask: PF RSS Mask value
6083 * Writes the specified value to the PF RSS Mask register.
6085 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask, bool sleep_ok)
6087 t4_tp_pio_write(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok);
6091 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
6092 * @adap: the adapter
6093 * @v4: holds the TCP/IP counter values
6094 * @v6: holds the TCP/IPv6 counter values
6095 * @sleep_ok: if true we may sleep while awaiting command completion
6097 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
6098 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
6100 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
6101 struct tp_tcp_stats *v6, bool sleep_ok)
6103 u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
6105 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
6106 #define STAT(x) val[STAT_IDX(x)]
6107 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
6110 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
6111 A_TP_MIB_TCP_OUT_RST, sleep_ok);
6112 v4->tcp_out_rsts = STAT(OUT_RST);
6113 v4->tcp_in_segs = STAT64(IN_SEG);
6114 v4->tcp_out_segs = STAT64(OUT_SEG);
6115 v4->tcp_retrans_segs = STAT64(RXT_SEG);
6118 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
6119 A_TP_MIB_TCP_V6OUT_RST, sleep_ok);
6120 v6->tcp_out_rsts = STAT(OUT_RST);
6121 v6->tcp_in_segs = STAT64(IN_SEG);
6122 v6->tcp_out_segs = STAT64(OUT_SEG);
6123 v6->tcp_retrans_segs = STAT64(RXT_SEG);
6131 * t4_tp_get_err_stats - read TP's error MIB counters
6132 * @adap: the adapter
6133 * @st: holds the counter values
6134 * @sleep_ok: if true we may sleep while awaiting command completion
6136 * Returns the values of TP's error counters.
6138 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
6141 int nchan = adap->chip_params->nchan;
6143 t4_tp_mib_read(adap, st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0,
6146 t4_tp_mib_read(adap, st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0,
6149 t4_tp_mib_read(adap, st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0,
6152 t4_tp_mib_read(adap, st->tnl_cong_drops, nchan,
6153 A_TP_MIB_TNL_CNG_DROP_0, sleep_ok);
6155 t4_tp_mib_read(adap, st->ofld_chan_drops, nchan,
6156 A_TP_MIB_OFD_CHN_DROP_0, sleep_ok);
6158 t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0,
6161 t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan,
6162 A_TP_MIB_OFD_VLN_DROP_0, sleep_ok);
6164 t4_tp_mib_read(adap, st->tcp6_in_errs, nchan,
6165 A_TP_MIB_TCP_V6IN_ERR_0, sleep_ok);
6167 t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP,
6172 * t4_tp_get_err_stats - read TP's error MIB counters
6173 * @adap: the adapter
6174 * @st: holds the counter values
6175 * @sleep_ok: if true we may sleep while awaiting command completion
6177 * Returns the values of TP's error counters.
6179 void t4_tp_get_tnl_stats(struct adapter *adap, struct tp_tnl_stats *st,
6182 int nchan = adap->chip_params->nchan;
6184 t4_tp_mib_read(adap, st->out_pkt, nchan, A_TP_MIB_TNL_OUT_PKT_0,
6186 t4_tp_mib_read(adap, st->in_pkt, nchan, A_TP_MIB_TNL_IN_PKT_0,
6191 * t4_tp_get_proxy_stats - read TP's proxy MIB counters
6192 * @adap: the adapter
6193 * @st: holds the counter values
6195 * Returns the values of TP's proxy counters.
6197 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st,
6200 int nchan = adap->chip_params->nchan;
6202 t4_tp_mib_read(adap, st->proxy, nchan, A_TP_MIB_TNL_LPBK_0, sleep_ok);
6206 * t4_tp_get_cpl_stats - read TP's CPL MIB counters
6207 * @adap: the adapter
6208 * @st: holds the counter values
6209 * @sleep_ok: if true we may sleep while awaiting command completion
6211 * Returns the values of TP's CPL counters.
6213 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
6216 int nchan = adap->chip_params->nchan;
6218 t4_tp_mib_read(adap, st->req, nchan, A_TP_MIB_CPL_IN_REQ_0, sleep_ok);
6220 t4_tp_mib_read(adap, st->rsp, nchan, A_TP_MIB_CPL_OUT_RSP_0, sleep_ok);
6224 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
6225 * @adap: the adapter
6226 * @st: holds the counter values
6228 * Returns the values of TP's RDMA counters.
6230 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
6233 t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, A_TP_MIB_RQE_DFR_PKT,
6238 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
6239 * @adap: the adapter
6240 * @idx: the port index
6241 * @st: holds the counter values
6242 * @sleep_ok: if true we may sleep while awaiting command completion
6244 * Returns the values of TP's FCoE counters for the selected port.
6246 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
6247 struct tp_fcoe_stats *st, bool sleep_ok)
6251 t4_tp_mib_read(adap, &st->frames_ddp, 1, A_TP_MIB_FCOE_DDP_0 + idx,
6254 t4_tp_mib_read(adap, &st->frames_drop, 1,
6255 A_TP_MIB_FCOE_DROP_0 + idx, sleep_ok);
6257 t4_tp_mib_read(adap, val, 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx,
6260 st->octets_ddp = ((u64)val[0] << 32) | val[1];
6264 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
6265 * @adap: the adapter
6266 * @st: holds the counter values
6267 * @sleep_ok: if true we may sleep while awaiting command completion
6269 * Returns the values of TP's counters for non-TCP directly-placed packets.
6271 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
6276 t4_tp_mib_read(adap, val, 4, A_TP_MIB_USM_PKTS, sleep_ok);
6278 st->frames = val[0];
6280 st->octets = ((u64)val[2] << 32) | val[3];
6284 * t4_tp_get_tid_stats - read TP's tid MIB counters.
6285 * @adap: the adapter
6286 * @st: holds the counter values
6287 * @sleep_ok: if true we may sleep while awaiting command completion
6289 * Returns the values of TP's counters for tids.
6291 void t4_tp_get_tid_stats(struct adapter *adap, struct tp_tid_stats *st,
6295 t4_tp_mib_read(adap, &st->del, 4, A_TP_MIB_TID_DEL, sleep_ok);
6299 * t4_read_mtu_tbl - returns the values in the HW path MTU table
6300 * @adap: the adapter
6301 * @mtus: where to store the MTU values
6302 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
6304 * Reads the HW path MTU table.
6306 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
6311 for (i = 0; i < NMTUS; ++i) {
6312 t4_write_reg(adap, A_TP_MTU_TABLE,
6313 V_MTUINDEX(0xff) | V_MTUVALUE(i));
6314 v = t4_read_reg(adap, A_TP_MTU_TABLE);
6315 mtus[i] = G_MTUVALUE(v);
6317 mtu_log[i] = G_MTUWIDTH(v);
6322 * t4_read_cong_tbl - reads the congestion control table
6323 * @adap: the adapter
6324 * @incr: where to store the alpha values
6326 * Reads the additive increments programmed into the HW congestion
6329 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
6331 unsigned int mtu, w;
6333 for (mtu = 0; mtu < NMTUS; ++mtu)
6334 for (w = 0; w < NCCTRL_WIN; ++w) {
6335 t4_write_reg(adap, A_TP_CCTRL_TABLE,
6336 V_ROWINDEX(0xffff) | (mtu << 5) | w);
6337 incr[mtu][w] = (u16)t4_read_reg(adap,
6338 A_TP_CCTRL_TABLE) & 0x1fff;
6343 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
6344 * @adap: the adapter
6345 * @addr: the indirect TP register address
6346 * @mask: specifies the field within the register to modify
6347 * @val: new value for the field
6349 * Sets a field of an indirect TP register to the given value.
6351 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
6352 unsigned int mask, unsigned int val)
6354 t4_write_reg(adap, A_TP_PIO_ADDR, addr);
6355 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
6356 t4_write_reg(adap, A_TP_PIO_DATA, val);
6360 * init_cong_ctrl - initialize congestion control parameters
6361 * @a: the alpha values for congestion control
6362 * @b: the beta values for congestion control
6364 * Initialize the congestion control parameters.
6366 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
6368 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
6393 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
6396 b[13] = b[14] = b[15] = b[16] = 3;
6397 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
6398 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
6403 /* The minimum additive increment value for the congestion control table */
6404 #define CC_MIN_INCR 2U
6407 * t4_load_mtus - write the MTU and congestion control HW tables
6408 * @adap: the adapter
6409 * @mtus: the values for the MTU table
6410 * @alpha: the values for the congestion control alpha parameter
6411 * @beta: the values for the congestion control beta parameter
6413 * Write the HW MTU table with the supplied MTUs and the high-speed
6414 * congestion control table with the supplied alpha, beta, and MTUs.
6415 * We write the two tables together because the additive increments
6416 * depend on the MTUs.
6418 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
6419 const unsigned short *alpha, const unsigned short *beta)
6421 static const unsigned int avg_pkts[NCCTRL_WIN] = {
6422 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
6423 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
6424 28672, 40960, 57344, 81920, 114688, 163840, 229376
6429 for (i = 0; i < NMTUS; ++i) {
6430 unsigned int mtu = mtus[i];
6431 unsigned int log2 = fls(mtu);
6433 if (!(mtu & ((1 << log2) >> 2))) /* round */
6435 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
6436 V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
6438 for (w = 0; w < NCCTRL_WIN; ++w) {
6441 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
6444 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
6445 (w << 16) | (beta[w] << 13) | inc);
6451 * t4_set_pace_tbl - set the pace table
6452 * @adap: the adapter
6453 * @pace_vals: the pace values in microseconds
6454 * @start: index of the first entry in the HW pace table to set
6455 * @n: how many entries to set
6457 * Sets (a subset of the) HW pace table.
6459 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
6460 unsigned int start, unsigned int n)
6462 unsigned int vals[NTX_SCHED], i;
6463 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
6468 /* convert values from us to dack ticks, rounding to closest value */
6469 for (i = 0; i < n; i++, pace_vals++) {
6470 vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
6471 if (vals[i] > 0x7ff)
6473 if (*pace_vals && vals[i] == 0)
6476 for (i = 0; i < n; i++, start++)
6477 t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
6482 * t4_set_sched_bps - set the bit rate for a HW traffic scheduler
6483 * @adap: the adapter
6484 * @kbps: target rate in Kbps
6485 * @sched: the scheduler index
6487 * Configure a Tx HW scheduler for the target rate.
6489 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
6491 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
6492 unsigned int clk = adap->params.vpd.cclk * 1000;
6493 unsigned int selected_cpt = 0, selected_bpt = 0;
6496 kbps *= 125; /* -> bytes */
6497 for (cpt = 1; cpt <= 255; cpt++) {
6499 bpt = (kbps + tps / 2) / tps;
6500 if (bpt > 0 && bpt <= 255) {
6502 delta = v >= kbps ? v - kbps : kbps - v;
6503 if (delta < mindelta) {
6508 } else if (selected_cpt)
6514 t4_write_reg(adap, A_TP_TM_PIO_ADDR,
6515 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
6516 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
6518 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
6520 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
6521 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
6526 * t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
6527 * @adap: the adapter
6528 * @sched: the scheduler index
6529 * @ipg: the interpacket delay in tenths of nanoseconds
6531 * Set the interpacket delay for a HW packet rate scheduler.
6533 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
6535 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
6537 /* convert ipg to nearest number of core clocks */
6538 ipg *= core_ticks_per_usec(adap);
6539 ipg = (ipg + 5000) / 10000;
6540 if (ipg > M_TXTIMERSEPQ0)
6543 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
6544 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
6546 v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
6548 v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
6549 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
6550 t4_read_reg(adap, A_TP_TM_PIO_DATA);
6555 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
6556 * clocks. The formula is
6558 * bytes/s = bytes256 * 256 * ClkFreq / 4096
6560 * which is equivalent to
6562 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
6564 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
6566 u64 v = (u64)bytes256 * adap->params.vpd.cclk;
6568 return v * 62 + v / 2;
6572 * t4_get_chan_txrate - get the current per channel Tx rates
6573 * @adap: the adapter
6574 * @nic_rate: rates for NIC traffic
6575 * @ofld_rate: rates for offloaded traffic
6577 * Return the current Tx rates in bytes/s for NIC and offloaded traffic
6580 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
6584 v = t4_read_reg(adap, A_TP_TX_TRATE);
6585 nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
6586 nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
6587 if (adap->chip_params->nchan > 2) {
6588 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
6589 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
6592 v = t4_read_reg(adap, A_TP_TX_ORATE);
6593 ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
6594 ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
6595 if (adap->chip_params->nchan > 2) {
6596 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
6597 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
6602 * t4_set_trace_filter - configure one of the tracing filters
6603 * @adap: the adapter
6604 * @tp: the desired trace filter parameters
6605 * @idx: which filter to configure
6606 * @enable: whether to enable or disable the filter
6608 * Configures one of the tracing filters available in HW. If @tp is %NULL
6609 * it indicates that the filter is already written in the register and it
6610 * just needs to be enabled or disabled.
6612 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
6613 int idx, int enable)
6615 int i, ofst = idx * 4;
6616 u32 data_reg, mask_reg, cfg;
6617 u32 multitrc = F_TRCMULTIFILTER;
6618 u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN;
6620 if (idx < 0 || idx >= NTRACE)
6623 if (tp == NULL || !enable) {
6624 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en,
6630 * TODO - After T4 data book is updated, specify the exact
6633 * See T4 data book - MPS section for a complete description
6634 * of the below if..else handling of A_MPS_TRC_CFG register
6637 cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
6638 if (cfg & F_TRCMULTIFILTER) {
6640 * If multiple tracers are enabled, then maximum
6641 * capture size is 2.5KB (FIFO size of a single channel)
6642 * minus 2 flits for CPL_TRACE_PKT header.
6644 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
6648 * If multiple tracers are disabled, to avoid deadlocks
6649 * maximum packet capture size of 9600 bytes is recommended.
6650 * Also in this mode, only trace0 can be enabled and running.
6653 if (tp->snap_len > 9600 || idx)
6657 if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 ||
6658 tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET ||
6659 tp->min_len > M_TFMINPKTSIZE)
6662 /* stop the tracer we'll be changing */
6663 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0);
6665 idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
6666 data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
6667 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
6669 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
6670 t4_write_reg(adap, data_reg, tp->data[i]);
6671 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
6673 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
6674 V_TFCAPTUREMAX(tp->snap_len) |
6675 V_TFMINPKTSIZE(tp->min_len));
6676 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
6677 V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en |
6679 V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) :
6680 V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert)));
6686 * t4_get_trace_filter - query one of the tracing filters
6687 * @adap: the adapter
6688 * @tp: the current trace filter parameters
6689 * @idx: which trace filter to query
6690 * @enabled: non-zero if the filter is enabled
6692 * Returns the current settings of one of the HW tracing filters.
6694 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
6698 int i, ofst = idx * 4;
6699 u32 data_reg, mask_reg;
6701 ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
6702 ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
6705 *enabled = !!(ctla & F_TFEN);
6706 tp->port = G_TFPORT(ctla);
6707 tp->invert = !!(ctla & F_TFINVERTMATCH);
6709 *enabled = !!(ctla & F_T5_TFEN);
6710 tp->port = G_T5_TFPORT(ctla);
6711 tp->invert = !!(ctla & F_T5_TFINVERTMATCH);
6713 tp->snap_len = G_TFCAPTUREMAX(ctlb);
6714 tp->min_len = G_TFMINPKTSIZE(ctlb);
6715 tp->skip_ofst = G_TFOFFSET(ctla);
6716 tp->skip_len = G_TFLENGTH(ctla);
6718 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
6719 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
6720 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
6722 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
6723 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
6724 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
6729 * t4_pmtx_get_stats - returns the HW stats from PMTX
6730 * @adap: the adapter
6731 * @cnt: where to store the count statistics
6732 * @cycles: where to store the cycle statistics
6734 * Returns performance statistics from PMTX.
6736 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
6741 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
6742 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
6743 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
6745 cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
6747 t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
6748 A_PM_TX_DBG_DATA, data, 2,
6749 A_PM_TX_DBG_STAT_MSB);
6750 cycles[i] = (((u64)data[0] << 32) | data[1]);
6756 * t4_pmrx_get_stats - returns the HW stats from PMRX
6757 * @adap: the adapter
6758 * @cnt: where to store the count statistics
6759 * @cycles: where to store the cycle statistics
6761 * Returns performance statistics from PMRX.
6763 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
6768 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
6769 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
6770 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
6772 cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
6774 t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
6775 A_PM_RX_DBG_DATA, data, 2,
6776 A_PM_RX_DBG_STAT_MSB);
6777 cycles[i] = (((u64)data[0] << 32) | data[1]);
6783 * t4_get_mps_bg_map - return the buffer groups associated with a port
6784 * @adap: the adapter
6785 * @idx: the port index
6787 * Returns a bitmap indicating which MPS buffer groups are associated
6788 * with the given port. Bit i is set if buffer group i is used by the
6791 static unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
6795 if (adap->params.mps_bg_map)
6796 return ((adap->params.mps_bg_map >> (idx << 3)) & 0xff);
6798 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
6800 return idx == 0 ? 0xf : 0;
6801 if (n == 1 && chip_id(adap) <= CHELSIO_T5)
6802 return idx < 2 ? (3 << (2 * idx)) : 0;
6807 * TP RX e-channels associated with the port.
6809 static unsigned int t4_get_rx_e_chan_map(struct adapter *adap, int idx)
6811 u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
6812 const u32 all_chan = (1 << adap->chip_params->nchan) - 1;
6815 return idx == 0 ? all_chan : 0;
6816 if (n == 1 && chip_id(adap) <= CHELSIO_T5)
6817 return idx < 2 ? (3 << (2 * idx)) : 0;
6822 * t4_get_port_type_description - return Port Type string description
6823 * @port_type: firmware Port Type enumeration
6825 const char *t4_get_port_type_description(enum fw_port_type port_type)
6827 static const char *const port_type_description[] = {
6852 if (port_type < ARRAY_SIZE(port_type_description))
6853 return port_type_description[port_type];
6858 * t4_get_port_stats_offset - collect port stats relative to a previous
6860 * @adap: The adapter
6862 * @stats: Current stats to fill
6863 * @offset: Previous stats snapshot
6865 void t4_get_port_stats_offset(struct adapter *adap, int idx,
6866 struct port_stats *stats,
6867 struct port_stats *offset)
6872 t4_get_port_stats(adap, idx, stats);
6873 for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
6874 i < (sizeof(struct port_stats)/sizeof(u64)) ;
6880 * t4_get_port_stats - collect port statistics
6881 * @adap: the adapter
6882 * @idx: the port index
6883 * @p: the stats structure to fill
6885 * Collect statistics related to the given port from HW.
6887 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
6889 struct port_info *pi = adap->port[idx];
6890 u32 bgmap = pi->mps_bg_map;
6891 u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
6893 #define GET_STAT(name) \
6894 t4_read_reg64(adap, \
6895 (is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
6896 T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
6897 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
6899 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
6900 p->tx_octets = GET_STAT(TX_PORT_BYTES);
6901 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
6902 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
6903 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
6904 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
6905 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
6906 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
6907 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
6908 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
6909 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
6910 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
6911 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
6912 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
6913 p->tx_drop = GET_STAT(TX_PORT_DROP);
6914 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
6915 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
6916 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
6917 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
6918 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
6919 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
6920 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
6921 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
6923 if (chip_id(adap) >= CHELSIO_T5) {
6924 if (stat_ctl & F_COUNTPAUSESTATTX) {
6925 p->tx_frames -= p->tx_pause;
6926 p->tx_octets -= p->tx_pause * 64;
6928 if (stat_ctl & F_COUNTPAUSEMCTX)
6929 p->tx_mcast_frames -= p->tx_pause;
6932 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
6933 p->rx_octets = GET_STAT(RX_PORT_BYTES);
6934 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
6935 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
6936 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
6937 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
6938 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
6939 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
6940 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
6941 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
6942 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
6943 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
6944 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
6945 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
6946 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
6947 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
6948 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
6949 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
6950 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
6951 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
6952 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
6953 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
6954 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
6955 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
6956 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
6957 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
6959 if (pi->fcs_reg != -1)
6960 p->rx_fcs_err = t4_read_reg64(adap, pi->fcs_reg) - pi->fcs_base;
6962 if (chip_id(adap) >= CHELSIO_T5) {
6963 if (stat_ctl & F_COUNTPAUSESTATRX) {
6964 p->rx_frames -= p->rx_pause;
6965 p->rx_octets -= p->rx_pause * 64;
6967 if (stat_ctl & F_COUNTPAUSEMCRX)
6968 p->rx_mcast_frames -= p->rx_pause;
6971 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
6972 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
6973 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
6974 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
6975 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
6976 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
6977 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
6978 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
6985 * t4_get_lb_stats - collect loopback port statistics
6986 * @adap: the adapter
6987 * @idx: the loopback port index
6988 * @p: the stats structure to fill
6990 * Return HW statistics for the given loopback port.
6992 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
6995 #define GET_STAT(name) \
6996 t4_read_reg64(adap, \
6998 PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
6999 T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
7000 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
7002 p->octets = GET_STAT(BYTES);
7003 p->frames = GET_STAT(FRAMES);
7004 p->bcast_frames = GET_STAT(BCAST);
7005 p->mcast_frames = GET_STAT(MCAST);
7006 p->ucast_frames = GET_STAT(UCAST);
7007 p->error_frames = GET_STAT(ERROR);
7009 p->frames_64 = GET_STAT(64B);
7010 p->frames_65_127 = GET_STAT(65B_127B);
7011 p->frames_128_255 = GET_STAT(128B_255B);
7012 p->frames_256_511 = GET_STAT(256B_511B);
7013 p->frames_512_1023 = GET_STAT(512B_1023B);
7014 p->frames_1024_1518 = GET_STAT(1024B_1518B);
7015 p->frames_1519_max = GET_STAT(1519B_MAX);
7016 p->drop = GET_STAT(DROP_FRAMES);
7018 if (idx < adap->params.nports) {
7019 u32 bg = adap2pinfo(adap, idx)->mps_bg_map;
7021 p->ovflow0 = (bg & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
7022 p->ovflow1 = (bg & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
7023 p->ovflow2 = (bg & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
7024 p->ovflow3 = (bg & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
7025 p->trunc0 = (bg & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
7026 p->trunc1 = (bg & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
7027 p->trunc2 = (bg & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
7028 p->trunc3 = (bg & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
7036 * t4_wol_magic_enable - enable/disable magic packet WoL
7037 * @adap: the adapter
7038 * @port: the physical port index
7039 * @addr: MAC address expected in magic packets, %NULL to disable
7041 * Enables/disables magic packet wake-on-LAN for the selected port.
7043 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
7046 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
7049 mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO);
7050 mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI);
7051 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
7053 mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO);
7054 mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI);
7055 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
7059 t4_write_reg(adap, mag_id_reg_l,
7060 (addr[2] << 24) | (addr[3] << 16) |
7061 (addr[4] << 8) | addr[5]);
7062 t4_write_reg(adap, mag_id_reg_h,
7063 (addr[0] << 8) | addr[1]);
7065 t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN,
7066 V_MAGICEN(addr != NULL));
7070 * t4_wol_pat_enable - enable/disable pattern-based WoL
7071 * @adap: the adapter
7072 * @port: the physical port index
7073 * @map: bitmap of which HW pattern filters to set
7074 * @mask0: byte mask for bytes 0-63 of a packet
7075 * @mask1: byte mask for bytes 64-127 of a packet
7076 * @crc: Ethernet CRC for selected bytes
7077 * @enable: enable/disable switch
7079 * Sets the pattern filters indicated in @map to mask out the bytes
7080 * specified in @mask0/@mask1 in received packets and compare the CRC of
7081 * the resulting packet against @crc. If @enable is %true pattern-based
7082 * WoL is enabled, otherwise disabled.
7084 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
7085 u64 mask0, u64 mask1, unsigned int crc, bool enable)
7091 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
7093 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
7096 t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
7102 #define EPIO_REG(name) \
7103 (is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \
7104 T5_PORT_REG(port, A_MAC_PORT_EPIO_##name))
7106 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
7107 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
7108 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
7110 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
7114 /* write byte masks */
7115 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
7116 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
7117 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
7118 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
7122 t4_write_reg(adap, EPIO_REG(DATA0), crc);
7123 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
7124 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
7125 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
7130 t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN);
7134 /* t4_mk_filtdelwr - create a delete filter WR
7135 * @ftid: the filter ID
7136 * @wr: the filter work request to populate
7137 * @qid: ingress queue to receive the delete notification
7139 * Creates a filter work request to delete the supplied filter. If @qid is
7140 * negative the delete notification is suppressed.
7142 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
7144 memset(wr, 0, sizeof(*wr));
7145 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
7146 wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
7147 wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
7148 V_FW_FILTER_WR_NOREPLY(qid < 0));
7149 wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
7151 wr->rx_chan_rx_rpl_iq =
7152 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
7155 #define INIT_CMD(var, cmd, rd_wr) do { \
7156 (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
7157 F_FW_CMD_REQUEST | \
7158 F_FW_CMD_##rd_wr); \
7159 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
7162 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
7166 struct fw_ldst_cmd c;
7168 memset(&c, 0, sizeof(c));
7169 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE);
7170 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
7174 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
7175 c.u.addrval.addr = cpu_to_be32(addr);
7176 c.u.addrval.val = cpu_to_be32(val);
7178 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7182 * t4_mdio_rd - read a PHY register through MDIO
7183 * @adap: the adapter
7184 * @mbox: mailbox to use for the FW command
7185 * @phy_addr: the PHY address
7186 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
7187 * @reg: the register to read
7188 * @valp: where to store the value
7190 * Issues a FW command through the given mailbox to read a PHY register.
7192 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
7193 unsigned int mmd, unsigned int reg, unsigned int *valp)
7197 struct fw_ldst_cmd c;
7199 memset(&c, 0, sizeof(c));
7200 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
7201 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
7202 F_FW_CMD_REQUEST | F_FW_CMD_READ |
7204 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
7205 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
7206 V_FW_LDST_CMD_MMD(mmd));
7207 c.u.mdio.raddr = cpu_to_be16(reg);
7209 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7211 *valp = be16_to_cpu(c.u.mdio.rval);
7216 * t4_mdio_wr - write a PHY register through MDIO
7217 * @adap: the adapter
7218 * @mbox: mailbox to use for the FW command
7219 * @phy_addr: the PHY address
7220 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
7221 * @reg: the register to write
7222 * @valp: value to write
7224 * Issues a FW command through the given mailbox to write a PHY register.
7226 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
7227 unsigned int mmd, unsigned int reg, unsigned int val)
7230 struct fw_ldst_cmd c;
7232 memset(&c, 0, sizeof(c));
7233 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
7234 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
7235 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7237 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
7238 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
7239 V_FW_LDST_CMD_MMD(mmd));
7240 c.u.mdio.raddr = cpu_to_be16(reg);
7241 c.u.mdio.rval = cpu_to_be16(val);
7243 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7248 * t4_sge_decode_idma_state - decode the idma state
7249 * @adap: the adapter
7250 * @state: the state idma is stuck in
7252 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
7254 static const char * const t4_decode[] = {
7256 "IDMA_PUSH_MORE_CPL_FIFO",
7257 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
7259 "IDMA_PHYSADDR_SEND_PCIEHDR",
7260 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
7261 "IDMA_PHYSADDR_SEND_PAYLOAD",
7262 "IDMA_SEND_FIFO_TO_IMSG",
7263 "IDMA_FL_REQ_DATA_FL_PREP",
7264 "IDMA_FL_REQ_DATA_FL",
7266 "IDMA_FL_H_REQ_HEADER_FL",
7267 "IDMA_FL_H_SEND_PCIEHDR",
7268 "IDMA_FL_H_PUSH_CPL_FIFO",
7269 "IDMA_FL_H_SEND_CPL",
7270 "IDMA_FL_H_SEND_IP_HDR_FIRST",
7271 "IDMA_FL_H_SEND_IP_HDR",
7272 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
7273 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
7274 "IDMA_FL_H_SEND_IP_HDR_PADDING",
7275 "IDMA_FL_D_SEND_PCIEHDR",
7276 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
7277 "IDMA_FL_D_REQ_NEXT_DATA_FL",
7278 "IDMA_FL_SEND_PCIEHDR",
7279 "IDMA_FL_PUSH_CPL_FIFO",
7281 "IDMA_FL_SEND_PAYLOAD_FIRST",
7282 "IDMA_FL_SEND_PAYLOAD",
7283 "IDMA_FL_REQ_NEXT_DATA_FL",
7284 "IDMA_FL_SEND_NEXT_PCIEHDR",
7285 "IDMA_FL_SEND_PADDING",
7286 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
7287 "IDMA_FL_SEND_FIFO_TO_IMSG",
7288 "IDMA_FL_REQ_DATAFL_DONE",
7289 "IDMA_FL_REQ_HEADERFL_DONE",
7291 static const char * const t5_decode[] = {
7294 "IDMA_PUSH_MORE_CPL_FIFO",
7295 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
7296 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
7297 "IDMA_PHYSADDR_SEND_PCIEHDR",
7298 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
7299 "IDMA_PHYSADDR_SEND_PAYLOAD",
7300 "IDMA_SEND_FIFO_TO_IMSG",
7301 "IDMA_FL_REQ_DATA_FL",
7303 "IDMA_FL_DROP_SEND_INC",
7304 "IDMA_FL_H_REQ_HEADER_FL",
7305 "IDMA_FL_H_SEND_PCIEHDR",
7306 "IDMA_FL_H_PUSH_CPL_FIFO",
7307 "IDMA_FL_H_SEND_CPL",
7308 "IDMA_FL_H_SEND_IP_HDR_FIRST",
7309 "IDMA_FL_H_SEND_IP_HDR",
7310 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
7311 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
7312 "IDMA_FL_H_SEND_IP_HDR_PADDING",
7313 "IDMA_FL_D_SEND_PCIEHDR",
7314 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
7315 "IDMA_FL_D_REQ_NEXT_DATA_FL",
7316 "IDMA_FL_SEND_PCIEHDR",
7317 "IDMA_FL_PUSH_CPL_FIFO",
7319 "IDMA_FL_SEND_PAYLOAD_FIRST",
7320 "IDMA_FL_SEND_PAYLOAD",
7321 "IDMA_FL_REQ_NEXT_DATA_FL",
7322 "IDMA_FL_SEND_NEXT_PCIEHDR",
7323 "IDMA_FL_SEND_PADDING",
7324 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
7326 static const char * const t6_decode[] = {
7328 "IDMA_PUSH_MORE_CPL_FIFO",
7329 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
7330 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
7331 "IDMA_PHYSADDR_SEND_PCIEHDR",
7332 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
7333 "IDMA_PHYSADDR_SEND_PAYLOAD",
7334 "IDMA_FL_REQ_DATA_FL",
7336 "IDMA_FL_DROP_SEND_INC",
7337 "IDMA_FL_H_REQ_HEADER_FL",
7338 "IDMA_FL_H_SEND_PCIEHDR",
7339 "IDMA_FL_H_PUSH_CPL_FIFO",
7340 "IDMA_FL_H_SEND_CPL",
7341 "IDMA_FL_H_SEND_IP_HDR_FIRST",
7342 "IDMA_FL_H_SEND_IP_HDR",
7343 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
7344 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
7345 "IDMA_FL_H_SEND_IP_HDR_PADDING",
7346 "IDMA_FL_D_SEND_PCIEHDR",
7347 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
7348 "IDMA_FL_D_REQ_NEXT_DATA_FL",
7349 "IDMA_FL_SEND_PCIEHDR",
7350 "IDMA_FL_PUSH_CPL_FIFO",
7352 "IDMA_FL_SEND_PAYLOAD_FIRST",
7353 "IDMA_FL_SEND_PAYLOAD",
7354 "IDMA_FL_REQ_NEXT_DATA_FL",
7355 "IDMA_FL_SEND_NEXT_PCIEHDR",
7356 "IDMA_FL_SEND_PADDING",
7357 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
7359 static const u32 sge_regs[] = {
7360 A_SGE_DEBUG_DATA_LOW_INDEX_2,
7361 A_SGE_DEBUG_DATA_LOW_INDEX_3,
7362 A_SGE_DEBUG_DATA_HIGH_INDEX_10,
7364 const char * const *sge_idma_decode;
7365 int sge_idma_decode_nstates;
7367 unsigned int chip_version = chip_id(adapter);
7369 /* Select the right set of decode strings to dump depending on the
7370 * adapter chip type.
7372 switch (chip_version) {
7374 sge_idma_decode = (const char * const *)t4_decode;
7375 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
7379 sge_idma_decode = (const char * const *)t5_decode;
7380 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
7384 sge_idma_decode = (const char * const *)t6_decode;
7385 sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
7389 CH_ERR(adapter, "Unsupported chip version %d\n", chip_version);
7393 if (state < sge_idma_decode_nstates)
7394 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
7396 CH_WARN(adapter, "idma state %d unknown\n", state);
7398 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
7399 CH_WARN(adapter, "SGE register %#x value %#x\n",
7400 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
7404 * t4_sge_ctxt_flush - flush the SGE context cache
7405 * @adap: the adapter
7406 * @mbox: mailbox to use for the FW command
7408 * Issues a FW command through the given mailbox to flush the
7409 * SGE context cache.
7411 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
7415 struct fw_ldst_cmd c;
7417 memset(&c, 0, sizeof(c));
7418 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(ctxt_type == CTXT_EGRESS ?
7419 FW_LDST_ADDRSPC_SGE_EGRC :
7420 FW_LDST_ADDRSPC_SGE_INGC);
7421 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
7422 F_FW_CMD_REQUEST | F_FW_CMD_READ |
7424 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
7425 c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH);
7427 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7432 * t4_fw_hello - establish communication with FW
7433 * @adap: the adapter
7434 * @mbox: mailbox to use for the FW command
7435 * @evt_mbox: mailbox to receive async FW events
7436 * @master: specifies the caller's willingness to be the device master
7437 * @state: returns the current device state (if non-NULL)
7439 * Issues a command to establish communication with FW. Returns either
7440 * an error (negative integer) or the mailbox of the Master PF.
7442 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
7443 enum dev_master master, enum dev_state *state)
7446 struct fw_hello_cmd c;
7448 unsigned int master_mbox;
7449 int retries = FW_CMD_HELLO_RETRIES;
7452 memset(&c, 0, sizeof(c));
7453 INIT_CMD(c, HELLO, WRITE);
7454 c.err_to_clearinit = cpu_to_be32(
7455 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
7456 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
7457 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ?
7458 mbox : M_FW_HELLO_CMD_MBMASTER) |
7459 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
7460 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
7461 F_FW_HELLO_CMD_CLEARINIT);
7464 * Issue the HELLO command to the firmware. If it's not successful
7465 * but indicates that we got a "busy" or "timeout" condition, retry
7466 * the HELLO until we exhaust our retry limit. If we do exceed our
7467 * retry limit, check to see if the firmware left us any error
7468 * information and report that if so ...
7470 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7471 if (ret != FW_SUCCESS) {
7472 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
7474 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
7475 t4_report_fw_error(adap);
7479 v = be32_to_cpu(c.err_to_clearinit);
7480 master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
7482 if (v & F_FW_HELLO_CMD_ERR)
7483 *state = DEV_STATE_ERR;
7484 else if (v & F_FW_HELLO_CMD_INIT)
7485 *state = DEV_STATE_INIT;
7487 *state = DEV_STATE_UNINIT;
7491 * If we're not the Master PF then we need to wait around for the
7492 * Master PF Driver to finish setting up the adapter.
7494 * Note that we also do this wait if we're a non-Master-capable PF and
7495 * there is no current Master PF; a Master PF may show up momentarily
7496 * and we wouldn't want to fail pointlessly. (This can happen when an
7497 * OS loads lots of different drivers rapidly at the same time). In
7498 * this case, the Master PF returned by the firmware will be
7499 * M_PCIE_FW_MASTER so the test below will work ...
7501 if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
7502 master_mbox != mbox) {
7503 int waiting = FW_CMD_HELLO_TIMEOUT;
7506 * Wait for the firmware to either indicate an error or
7507 * initialized state. If we see either of these we bail out
7508 * and report the issue to the caller. If we exhaust the
7509 * "hello timeout" and we haven't exhausted our retries, try
7510 * again. Otherwise bail with a timeout error.
7519 * If neither Error nor Initialialized are indicated
7520 * by the firmware keep waiting till we exhaust our
7521 * timeout ... and then retry if we haven't exhausted
7524 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
7525 if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
7536 * We either have an Error or Initialized condition
7537 * report errors preferentially.
7540 if (pcie_fw & F_PCIE_FW_ERR)
7541 *state = DEV_STATE_ERR;
7542 else if (pcie_fw & F_PCIE_FW_INIT)
7543 *state = DEV_STATE_INIT;
7547 * If we arrived before a Master PF was selected and
7548 * there's not a valid Master PF, grab its identity
7551 if (master_mbox == M_PCIE_FW_MASTER &&
7552 (pcie_fw & F_PCIE_FW_MASTER_VLD))
7553 master_mbox = G_PCIE_FW_MASTER(pcie_fw);
7562 * t4_fw_bye - end communication with FW
7563 * @adap: the adapter
7564 * @mbox: mailbox to use for the FW command
7566 * Issues a command to terminate communication with FW.
7568 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
7570 struct fw_bye_cmd c;
7572 memset(&c, 0, sizeof(c));
7573 INIT_CMD(c, BYE, WRITE);
7574 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7578 * t4_fw_reset - issue a reset to FW
7579 * @adap: the adapter
7580 * @mbox: mailbox to use for the FW command
7581 * @reset: specifies the type of reset to perform
7583 * Issues a reset command of the specified type to FW.
7585 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
7587 struct fw_reset_cmd c;
7589 memset(&c, 0, sizeof(c));
7590 INIT_CMD(c, RESET, WRITE);
7591 c.val = cpu_to_be32(reset);
7592 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7596 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
7597 * @adap: the adapter
7598 * @mbox: mailbox to use for the FW RESET command (if desired)
7599 * @force: force uP into RESET even if FW RESET command fails
7601 * Issues a RESET command to firmware (if desired) with a HALT indication
7602 * and then puts the microprocessor into RESET state. The RESET command
7603 * will only be issued if a legitimate mailbox is provided (mbox <=
7604 * M_PCIE_FW_MASTER).
7606 * This is generally used in order for the host to safely manipulate the
7607 * adapter without fear of conflicting with whatever the firmware might
7608 * be doing. The only way out of this state is to RESTART the firmware
7611 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
7616 * If a legitimate mailbox is provided, issue a RESET command
7617 * with a HALT indication.
7619 if (adap->flags & FW_OK && mbox <= M_PCIE_FW_MASTER) {
7620 struct fw_reset_cmd c;
7622 memset(&c, 0, sizeof(c));
7623 INIT_CMD(c, RESET, WRITE);
7624 c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
7625 c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
7626 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7630 * Normally we won't complete the operation if the firmware RESET
7631 * command fails but if our caller insists we'll go ahead and put the
7632 * uP into RESET. This can be useful if the firmware is hung or even
7633 * missing ... We'll have to take the risk of putting the uP into
7634 * RESET without the cooperation of firmware in that case.
7636 * We also force the firmware's HALT flag to be on in case we bypassed
7637 * the firmware RESET command above or we're dealing with old firmware
7638 * which doesn't have the HALT capability. This will serve as a flag
7639 * for the incoming firmware to know that it's coming out of a HALT
7640 * rather than a RESET ... if it's new enough to understand that ...
7642 if (ret == 0 || force) {
7643 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
7644 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
7649 * And we always return the result of the firmware RESET command
7650 * even when we force the uP into RESET ...
7656 * t4_fw_restart - restart the firmware by taking the uP out of RESET
7657 * @adap: the adapter
7659 * Restart firmware previously halted by t4_fw_halt(). On successful
7660 * return the previous PF Master remains as the new PF Master and there
7661 * is no need to issue a new HELLO command, etc.
7663 int t4_fw_restart(struct adapter *adap, unsigned int mbox)
7667 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
7668 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
7669 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
7679 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
7680 * @adap: the adapter
7681 * @mbox: mailbox to use for the FW RESET command (if desired)
7682 * @fw_data: the firmware image to write
7684 * @force: force upgrade even if firmware doesn't cooperate
7686 * Perform all of the steps necessary for upgrading an adapter's
7687 * firmware image. Normally this requires the cooperation of the
7688 * existing firmware in order to halt all existing activities
7689 * but if an invalid mailbox token is passed in we skip that step
7690 * (though we'll still put the adapter microprocessor into RESET in
7693 * On successful return the new firmware will have been loaded and
7694 * the adapter will have been fully RESET losing all previous setup
7695 * state. On unsuccessful return the adapter may be completely hosed ...
7696 * positive errno indicates that the adapter is ~probably~ intact, a
7697 * negative errno indicates that things are looking bad ...
7699 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
7700 const u8 *fw_data, unsigned int size, int force)
7702 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
7703 unsigned int bootstrap =
7704 be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
7707 if (!t4_fw_matches_chip(adap, fw_hdr))
7711 ret = t4_fw_halt(adap, mbox, force);
7712 if (ret < 0 && !force)
7716 ret = t4_load_fw(adap, fw_data, size);
7717 if (ret < 0 || bootstrap)
7720 return t4_fw_restart(adap, mbox);
7724 * t4_fw_initialize - ask FW to initialize the device
7725 * @adap: the adapter
7726 * @mbox: mailbox to use for the FW command
7728 * Issues a command to FW to partially initialize the device. This
7729 * performs initialization that generally doesn't depend on user input.
7731 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
7733 struct fw_initialize_cmd c;
7735 memset(&c, 0, sizeof(c));
7736 INIT_CMD(c, INITIALIZE, WRITE);
7737 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7741 * t4_query_params_rw - query FW or device parameters
7742 * @adap: the adapter
7743 * @mbox: mailbox to use for the FW command
7746 * @nparams: the number of parameters
7747 * @params: the parameter names
7748 * @val: the parameter values
7749 * @rw: Write and read flag
7751 * Reads the value of FW or device parameters. Up to 7 parameters can be
7754 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
7755 unsigned int vf, unsigned int nparams, const u32 *params,
7759 struct fw_params_cmd c;
7760 __be32 *p = &c.param[0].mnem;
7765 memset(&c, 0, sizeof(c));
7766 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
7767 F_FW_CMD_REQUEST | F_FW_CMD_READ |
7768 V_FW_PARAMS_CMD_PFN(pf) |
7769 V_FW_PARAMS_CMD_VFN(vf));
7770 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7772 for (i = 0; i < nparams; i++) {
7773 *p++ = cpu_to_be32(*params++);
7775 *p = cpu_to_be32(*(val + i));
7779 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7781 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
7782 *val++ = be32_to_cpu(*p);
7786 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7787 unsigned int vf, unsigned int nparams, const u32 *params,
7790 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
7794 * t4_set_params_timeout - sets FW or device parameters
7795 * @adap: the adapter
7796 * @mbox: mailbox to use for the FW command
7799 * @nparams: the number of parameters
7800 * @params: the parameter names
7801 * @val: the parameter values
7802 * @timeout: the timeout time
7804 * Sets the value of FW or device parameters. Up to 7 parameters can be
7805 * specified at once.
7807 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
7808 unsigned int pf, unsigned int vf,
7809 unsigned int nparams, const u32 *params,
7810 const u32 *val, int timeout)
7812 struct fw_params_cmd c;
7813 __be32 *p = &c.param[0].mnem;
7818 memset(&c, 0, sizeof(c));
7819 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
7820 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7821 V_FW_PARAMS_CMD_PFN(pf) |
7822 V_FW_PARAMS_CMD_VFN(vf));
7823 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7826 *p++ = cpu_to_be32(*params++);
7827 *p++ = cpu_to_be32(*val++);
7830 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
7834 * t4_set_params - sets FW or device parameters
7835 * @adap: the adapter
7836 * @mbox: mailbox to use for the FW command
7839 * @nparams: the number of parameters
7840 * @params: the parameter names
7841 * @val: the parameter values
7843 * Sets the value of FW or device parameters. Up to 7 parameters can be
7844 * specified at once.
7846 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7847 unsigned int vf, unsigned int nparams, const u32 *params,
7850 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
7851 FW_CMD_MAX_TIMEOUT);
7855 * t4_cfg_pfvf - configure PF/VF resource limits
7856 * @adap: the adapter
7857 * @mbox: mailbox to use for the FW command
7858 * @pf: the PF being configured
7859 * @vf: the VF being configured
7860 * @txq: the max number of egress queues
7861 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
7862 * @rxqi: the max number of interrupt-capable ingress queues
7863 * @rxq: the max number of interruptless ingress queues
7864 * @tc: the PCI traffic class
7865 * @vi: the max number of virtual interfaces
7866 * @cmask: the channel access rights mask for the PF/VF
7867 * @pmask: the port access rights mask for the PF/VF
7868 * @nexact: the maximum number of exact MPS filters
7869 * @rcaps: read capabilities
7870 * @wxcaps: write/execute capabilities
7872 * Configures resource limits and capabilities for a physical or virtual
7875 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
7876 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
7877 unsigned int rxqi, unsigned int rxq, unsigned int tc,
7878 unsigned int vi, unsigned int cmask, unsigned int pmask,
7879 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
7881 struct fw_pfvf_cmd c;
7883 memset(&c, 0, sizeof(c));
7884 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
7885 F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
7886 V_FW_PFVF_CMD_VFN(vf));
7887 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7888 c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
7889 V_FW_PFVF_CMD_NIQ(rxq));
7890 c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) |
7891 V_FW_PFVF_CMD_PMASK(pmask) |
7892 V_FW_PFVF_CMD_NEQ(txq));
7893 c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) |
7894 V_FW_PFVF_CMD_NVI(vi) |
7895 V_FW_PFVF_CMD_NEXACTF(nexact));
7896 c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) |
7897 V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
7898 V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
7899 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7903 * t4_alloc_vi_func - allocate a virtual interface
7904 * @adap: the adapter
7905 * @mbox: mailbox to use for the FW command
7906 * @port: physical port associated with the VI
7907 * @pf: the PF owning the VI
7908 * @vf: the VF owning the VI
7909 * @nmac: number of MAC addresses needed (1 to 5)
7910 * @mac: the MAC addresses of the VI
7911 * @rss_size: size of RSS table slice associated with this VI
7912 * @portfunc: which Port Application Function MAC Address is desired
7913 * @idstype: Intrusion Detection Type
7915 * Allocates a virtual interface for the given physical port. If @mac is
7916 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
7917 * If @rss_size is %NULL the VI is not assigned any RSS slice by FW.
7918 * @mac should be large enough to hold @nmac Ethernet addresses, they are
7919 * stored consecutively so the space needed is @nmac * 6 bytes.
7920 * Returns a negative error number or the non-negative VI id.
7922 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
7923 unsigned int port, unsigned int pf, unsigned int vf,
7924 unsigned int nmac, u8 *mac, u16 *rss_size,
7925 uint8_t *vfvld, uint16_t *vin,
7926 unsigned int portfunc, unsigned int idstype)
7931 memset(&c, 0, sizeof(c));
7932 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
7933 F_FW_CMD_WRITE | F_FW_CMD_EXEC |
7934 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
7935 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
7936 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
7937 V_FW_VI_CMD_FUNC(portfunc));
7938 c.portid_pkd = V_FW_VI_CMD_PORTID(port);
7941 c.norss_rsssize = F_FW_VI_CMD_NORSS;
7943 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7946 ret = G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid));
7949 memcpy(mac, c.mac, sizeof(c.mac));
7952 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
7954 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
7956 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
7958 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
7962 *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
7964 *vfvld = adap->params.viid_smt_extn_support ?
7965 G_FW_VI_CMD_VFVLD(be32_to_cpu(c.alloc_to_len16)) :
7966 G_FW_VIID_VIVLD(ret);
7969 *vin = adap->params.viid_smt_extn_support ?
7970 G_FW_VI_CMD_VIN(be32_to_cpu(c.alloc_to_len16)) :
7978 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface
7979 * @adap: the adapter
7980 * @mbox: mailbox to use for the FW command
7981 * @port: physical port associated with the VI
7982 * @pf: the PF owning the VI
7983 * @vf: the VF owning the VI
7984 * @nmac: number of MAC addresses needed (1 to 5)
7985 * @mac: the MAC addresses of the VI
7986 * @rss_size: size of RSS table slice associated with this VI
7988 * backwards compatible and convieniance routine to allocate a Virtual
7989 * Interface with a Ethernet Port Application Function and Intrustion
7990 * Detection System disabled.
7992 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
7993 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
7994 u16 *rss_size, uint8_t *vfvld, uint16_t *vin)
7996 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
7997 vfvld, vin, FW_VI_FUNC_ETH, 0);
8001 * t4_free_vi - free a virtual interface
8002 * @adap: the adapter
8003 * @mbox: mailbox to use for the FW command
8004 * @pf: the PF owning the VI
8005 * @vf: the VF owning the VI
8006 * @viid: virtual interface identifiler
8008 * Free a previously allocated virtual interface.
8010 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
8011 unsigned int vf, unsigned int viid)
8015 memset(&c, 0, sizeof(c));
8016 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
8019 V_FW_VI_CMD_PFN(pf) |
8020 V_FW_VI_CMD_VFN(vf));
8021 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
8022 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
8024 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
8028 * t4_set_rxmode - set Rx properties of a virtual interface
8029 * @adap: the adapter
8030 * @mbox: mailbox to use for the FW command
8032 * @mtu: the new MTU or -1
8033 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
8034 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
8035 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
8036 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
8037 * @sleep_ok: if true we may sleep while awaiting command completion
8039 * Sets Rx properties of a virtual interface.
8041 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
8042 int mtu, int promisc, int all_multi, int bcast, int vlanex,
8045 struct fw_vi_rxmode_cmd c;
8047 /* convert to FW values */
8049 mtu = M_FW_VI_RXMODE_CMD_MTU;
8051 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
8053 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
8055 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
8057 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
8059 memset(&c, 0, sizeof(c));
8060 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
8061 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8062 V_FW_VI_RXMODE_CMD_VIID(viid));
8063 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
8065 cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
8066 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
8067 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
8068 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
8069 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
8070 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
8074 * t4_alloc_encap_mac_filt - Adds a mac entry in mps tcam with VNI support
8075 * @adap: the adapter
8077 * @mac: the MAC address
8079 * @vni: the VNI id for the tunnel protocol
8080 * @vni_mask: mask for the VNI id
8081 * @dip_hit: to enable DIP match for the MPS entry
8082 * @lookup_type: MAC address for inner (1) or outer (0) header
8083 * @sleep_ok: call is allowed to sleep
8085 * Allocates an MPS entry with specified MAC address and VNI value.
8087 * Returns a negative error number or the allocated index for this mac.
8089 int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
8090 const u8 *addr, const u8 *mask, unsigned int vni,
8091 unsigned int vni_mask, u8 dip_hit, u8 lookup_type,
8094 struct fw_vi_mac_cmd c;
8095 struct fw_vi_mac_vni *p = c.u.exact_vni;
8099 memset(&c, 0, sizeof(c));
8100 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8101 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8102 V_FW_VI_MAC_CMD_VIID(viid));
8103 val = V_FW_CMD_LEN16(1) |
8104 V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_EXACTMAC_VNI);
8105 c.freemacs_to_len16 = cpu_to_be32(val);
8106 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
8107 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
8108 memcpy(p->macaddr, addr, sizeof(p->macaddr));
8109 memcpy(p->macaddr_mask, mask, sizeof(p->macaddr_mask));
8111 p->lookup_type_to_vni = cpu_to_be32(V_FW_VI_MAC_CMD_VNI(vni) |
8112 V_FW_VI_MAC_CMD_DIP_HIT(dip_hit) |
8113 V_FW_VI_MAC_CMD_LOOKUP_TYPE(lookup_type));
8114 p->vni_mask_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_VNI_MASK(vni_mask));
8116 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
8118 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
8123 * t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
8124 * @adap: the adapter
8126 * @mac: the MAC address
8128 * @idx: index at which to add this entry
8129 * @port_id: the port index
8130 * @lookup_type: MAC address for inner (1) or outer (0) header
8131 * @sleep_ok: call is allowed to sleep
8133 * Adds the mac entry at the specified index using raw mac interface.
8135 * Returns a negative error number or the allocated index for this mac.
8137 int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
8138 const u8 *addr, const u8 *mask, unsigned int idx,
8139 u8 lookup_type, u8 port_id, bool sleep_ok)
8142 struct fw_vi_mac_cmd c;
8143 struct fw_vi_mac_raw *p = &c.u.raw;
8146 memset(&c, 0, sizeof(c));
8147 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8148 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8149 V_FW_VI_MAC_CMD_VIID(viid));
8150 val = V_FW_CMD_LEN16(1) |
8151 V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW);
8152 c.freemacs_to_len16 = cpu_to_be32(val);
8154 /* Specify that this is an inner mac address */
8155 p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx));
8157 /* Lookup Type. Outer header: 0, Inner header: 1 */
8158 p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) |
8159 V_DATAPORTNUM(port_id));
8160 /* Lookup mask and port mask */
8161 p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) |
8162 V_DATAPORTNUM(M_DATAPORTNUM));
8164 /* Copy the address and the mask */
8165 memcpy((u8 *)&p->data1[0] + 2, addr, ETHER_ADDR_LEN);
8166 memcpy((u8 *)&p->data1m[0] + 2, mask, ETHER_ADDR_LEN);
8168 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
8170 ret = G_FW_VI_MAC_CMD_RAW_IDX(be32_to_cpu(p->raw_idx_pkd));
8179 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
8180 * @adap: the adapter
8181 * @mbox: mailbox to use for the FW command
8183 * @free: if true any existing filters for this VI id are first removed
8184 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
8185 * @addr: the MAC address(es)
8186 * @idx: where to store the index of each allocated filter
8187 * @hash: pointer to hash address filter bitmap
8188 * @sleep_ok: call is allowed to sleep
8190 * Allocates an exact-match filter for each of the supplied addresses and
8191 * sets it to the corresponding address. If @idx is not %NULL it should
8192 * have at least @naddr entries, each of which will be set to the index of
8193 * the filter allocated for the corresponding MAC address. If a filter
8194 * could not be allocated for an address its index is set to 0xffff.
8195 * If @hash is not %NULL addresses that fail to allocate an exact filter
8196 * are hashed and update the hash filter bitmap pointed at by @hash.
8198 * Returns a negative error number or the number of filters allocated.
8200 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
8201 unsigned int viid, bool free, unsigned int naddr,
8202 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
8204 int offset, ret = 0;
8205 struct fw_vi_mac_cmd c;
8206 unsigned int nfilters = 0;
8207 unsigned int max_naddr = adap->chip_params->mps_tcam_size;
8208 unsigned int rem = naddr;
8210 if (naddr > max_naddr)
8213 for (offset = 0; offset < naddr ; /**/) {
8214 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
8216 : ARRAY_SIZE(c.u.exact));
8217 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
8218 u.exact[fw_naddr]), 16);
8219 struct fw_vi_mac_exact *p;
8222 memset(&c, 0, sizeof(c));
8223 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8226 V_FW_CMD_EXEC(free) |
8227 V_FW_VI_MAC_CMD_VIID(viid));
8228 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) |
8229 V_FW_CMD_LEN16(len16));
8231 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8233 cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
8234 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
8235 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
8239 * It's okay if we run out of space in our MAC address arena.
8240 * Some of the addresses we submit may get stored so we need
8241 * to run through the reply to see what the results were ...
8243 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
8244 if (ret && ret != -FW_ENOMEM)
8247 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8248 u16 index = G_FW_VI_MAC_CMD_IDX(
8249 be16_to_cpu(p->valid_to_idx));
8252 idx[offset+i] = (index >= max_naddr
8255 if (index < max_naddr)
8258 *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
8266 if (ret == 0 || ret == -FW_ENOMEM)
8272 * t4_free_encap_mac_filt - frees MPS entry at given index
8273 * @adap: the adapter
8275 * @idx: index of MPS entry to be freed
8276 * @sleep_ok: call is allowed to sleep
8278 * Frees the MPS entry at supplied index
8280 * Returns a negative error number or zero on success
8282 int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
8283 int idx, bool sleep_ok)
8285 struct fw_vi_mac_exact *p;
8286 struct fw_vi_mac_cmd c;
8287 u8 addr[] = {0,0,0,0,0,0};
8291 memset(&c, 0, sizeof(c));
8292 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8296 V_FW_VI_MAC_CMD_VIID(viid));
8297 exact = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_EXACTMAC);
8298 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) |
8302 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
8303 V_FW_VI_MAC_CMD_IDX(idx));
8304 memcpy(p->macaddr, addr, sizeof(p->macaddr));
8306 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
8311 * t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
8312 * @adap: the adapter
8314 * @addr: the MAC address
8316 * @idx: index of the entry in mps tcam
8317 * @lookup_type: MAC address for inner (1) or outer (0) header
8318 * @port_id: the port index
8319 * @sleep_ok: call is allowed to sleep
8321 * Removes the mac entry at the specified index using raw mac interface.
8323 * Returns a negative error number on failure.
8325 int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
8326 const u8 *addr, const u8 *mask, unsigned int idx,
8327 u8 lookup_type, u8 port_id, bool sleep_ok)
8329 struct fw_vi_mac_cmd c;
8330 struct fw_vi_mac_raw *p = &c.u.raw;
8333 memset(&c, 0, sizeof(c));
8334 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8335 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8337 V_FW_VI_MAC_CMD_VIID(viid));
8338 raw = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW);
8339 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) |
8343 p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx) |
8344 FW_VI_MAC_ID_BASED_FREE);
8346 /* Lookup Type. Outer header: 0, Inner header: 1 */
8347 p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) |
8348 V_DATAPORTNUM(port_id));
8349 /* Lookup mask and port mask */
8350 p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) |
8351 V_DATAPORTNUM(M_DATAPORTNUM));
8353 /* Copy the address and the mask */
8354 memcpy((u8 *)&p->data1[0] + 2, addr, ETHER_ADDR_LEN);
8355 memcpy((u8 *)&p->data1m[0] + 2, mask, ETHER_ADDR_LEN);
8357 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
8361 * t4_free_mac_filt - frees exact-match filters of given MAC addresses
8362 * @adap: the adapter
8363 * @mbox: mailbox to use for the FW command
8365 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
8366 * @addr: the MAC address(es)
8367 * @sleep_ok: call is allowed to sleep
8369 * Frees the exact-match filter for each of the supplied addresses
8371 * Returns a negative error number or the number of filters freed.
8373 int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
8374 unsigned int viid, unsigned int naddr,
8375 const u8 **addr, bool sleep_ok)
8377 int offset, ret = 0;
8378 struct fw_vi_mac_cmd c;
8379 unsigned int nfilters = 0;
8380 unsigned int max_naddr = adap->chip_params->mps_tcam_size;
8381 unsigned int rem = naddr;
8383 if (naddr > max_naddr)
8386 for (offset = 0; offset < (int)naddr ; /**/) {
8387 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
8389 : ARRAY_SIZE(c.u.exact));
8390 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
8391 u.exact[fw_naddr]), 16);
8392 struct fw_vi_mac_exact *p;
8395 memset(&c, 0, sizeof(c));
8396 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8400 V_FW_VI_MAC_CMD_VIID(viid));
8401 c.freemacs_to_len16 =
8402 cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) |
8403 V_FW_CMD_LEN16(len16));
8405 for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) {
8406 p->valid_to_idx = cpu_to_be16(
8407 F_FW_VI_MAC_CMD_VALID |
8408 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_MAC_BASED_FREE));
8409 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
8412 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
8416 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8417 u16 index = G_FW_VI_MAC_CMD_IDX(
8418 be16_to_cpu(p->valid_to_idx));
8420 if (index < max_naddr)
8434 * t4_change_mac - modifies the exact-match filter for a MAC address
8435 * @adap: the adapter
8436 * @mbox: mailbox to use for the FW command
8438 * @idx: index of existing filter for old value of MAC address, or -1
8439 * @addr: the new MAC address value
8440 * @persist: whether a new MAC allocation should be persistent
8441 * @smt_idx: add MAC to SMT and return its index, or NULL
8443 * Modifies an exact-match filter and sets it to the new MAC address if
8444 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
8445 * latter case the address is added persistently if @persist is %true.
8447 * Note that in general it is not possible to modify the value of a given
8448 * filter so the generic way to modify an address filter is to free the one
8449 * being used by the old address value and allocate a new filter for the
8450 * new address value.
8452 * Returns a negative error number or the index of the filter with the new
8453 * MAC value. Note that this index may differ from @idx.
8455 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
8456 int idx, const u8 *addr, bool persist, uint16_t *smt_idx)
8459 struct fw_vi_mac_cmd c;
8460 struct fw_vi_mac_exact *p = c.u.exact;
8461 unsigned int max_mac_addr = adap->chip_params->mps_tcam_size;
8463 if (idx < 0) /* new allocation */
8464 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
8465 mode = smt_idx ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
8467 memset(&c, 0, sizeof(c));
8468 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8469 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8470 V_FW_VI_MAC_CMD_VIID(viid));
8471 c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1));
8472 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
8473 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
8474 V_FW_VI_MAC_CMD_IDX(idx));
8475 memcpy(p->macaddr, addr, sizeof(p->macaddr));
8477 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
8479 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
8480 if (ret >= max_mac_addr)
8483 if (adap->params.viid_smt_extn_support)
8484 *smt_idx = G_FW_VI_MAC_CMD_SMTID(be32_to_cpu(c.op_to_viid));
8486 if (chip_id(adap) <= CHELSIO_T5)
8487 *smt_idx = (viid & M_FW_VIID_VIN) << 1;
8489 *smt_idx = viid & M_FW_VIID_VIN;
8497 * t4_set_addr_hash - program the MAC inexact-match hash filter
8498 * @adap: the adapter
8499 * @mbox: mailbox to use for the FW command
8501 * @ucast: whether the hash filter should also match unicast addresses
8502 * @vec: the value to be written to the hash filter
8503 * @sleep_ok: call is allowed to sleep
8505 * Sets the 64-bit inexact-match hash filter for a virtual interface.
8507 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
8508 bool ucast, u64 vec, bool sleep_ok)
8510 struct fw_vi_mac_cmd c;
8513 memset(&c, 0, sizeof(c));
8514 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8515 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8516 V_FW_VI_ENABLE_CMD_VIID(viid));
8517 val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) |
8518 V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1);
8519 c.freemacs_to_len16 = cpu_to_be32(val);
8520 c.u.hash.hashvec = cpu_to_be64(vec);
8521 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
8525 * t4_enable_vi_params - enable/disable a virtual interface
8526 * @adap: the adapter
8527 * @mbox: mailbox to use for the FW command
8529 * @rx_en: 1=enable Rx, 0=disable Rx
8530 * @tx_en: 1=enable Tx, 0=disable Tx
8531 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
8533 * Enables/disables a virtual interface. Note that setting DCB Enable
8534 * only makes sense when enabling a Virtual Interface ...
8536 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
8537 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
8539 struct fw_vi_enable_cmd c;
8541 memset(&c, 0, sizeof(c));
8542 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
8543 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8544 V_FW_VI_ENABLE_CMD_VIID(viid));
8545 c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
8546 V_FW_VI_ENABLE_CMD_EEN(tx_en) |
8547 V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
8549 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
8553 * t4_enable_vi - enable/disable a virtual interface
8554 * @adap: the adapter
8555 * @mbox: mailbox to use for the FW command
8557 * @rx_en: 1=enable Rx, 0=disable Rx
8558 * @tx_en: 1=enable Tx, 0=disable Tx
8560 * Enables/disables a virtual interface. Note that setting DCB Enable
8561 * only makes sense when enabling a Virtual Interface ...
8563 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
8564 bool rx_en, bool tx_en)
8566 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
8570 * t4_identify_port - identify a VI's port by blinking its LED
8571 * @adap: the adapter
8572 * @mbox: mailbox to use for the FW command
8574 * @nblinks: how many times to blink LED at 2.5 Hz
8576 * Identifies a VI's port by blinking its LED.
8578 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
8579 unsigned int nblinks)
8581 struct fw_vi_enable_cmd c;
8583 memset(&c, 0, sizeof(c));
8584 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
8585 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8586 V_FW_VI_ENABLE_CMD_VIID(viid));
8587 c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
8588 c.blinkdur = cpu_to_be16(nblinks);
8589 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8593 * t4_iq_stop - stop an ingress queue and its FLs
8594 * @adap: the adapter
8595 * @mbox: mailbox to use for the FW command
8596 * @pf: the PF owning the queues
8597 * @vf: the VF owning the queues
8598 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
8599 * @iqid: ingress queue id
8600 * @fl0id: FL0 queue id or 0xffff if no attached FL0
8601 * @fl1id: FL1 queue id or 0xffff if no attached FL1
8603 * Stops an ingress queue and its associated FLs, if any. This causes
8604 * any current or future data/messages destined for these queues to be
8607 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
8608 unsigned int vf, unsigned int iqtype, unsigned int iqid,
8609 unsigned int fl0id, unsigned int fl1id)
8613 memset(&c, 0, sizeof(c));
8614 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
8615 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
8616 V_FW_IQ_CMD_VFN(vf));
8617 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c));
8618 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
8619 c.iqid = cpu_to_be16(iqid);
8620 c.fl0id = cpu_to_be16(fl0id);
8621 c.fl1id = cpu_to_be16(fl1id);
8622 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8626 * t4_iq_free - free an ingress queue and its FLs
8627 * @adap: the adapter
8628 * @mbox: mailbox to use for the FW command
8629 * @pf: the PF owning the queues
8630 * @vf: the VF owning the queues
8631 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
8632 * @iqid: ingress queue id
8633 * @fl0id: FL0 queue id or 0xffff if no attached FL0
8634 * @fl1id: FL1 queue id or 0xffff if no attached FL1
8636 * Frees an ingress queue and its associated FLs, if any.
8638 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8639 unsigned int vf, unsigned int iqtype, unsigned int iqid,
8640 unsigned int fl0id, unsigned int fl1id)
8644 memset(&c, 0, sizeof(c));
8645 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
8646 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
8647 V_FW_IQ_CMD_VFN(vf));
8648 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
8649 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
8650 c.iqid = cpu_to_be16(iqid);
8651 c.fl0id = cpu_to_be16(fl0id);
8652 c.fl1id = cpu_to_be16(fl1id);
8653 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8657 * t4_eth_eq_stop - stop an Ethernet egress queue
8658 * @adap: the adapter
8659 * @mbox: mailbox to use for the FW command
8660 * @pf: the PF owning the queues
8661 * @vf: the VF owning the queues
8662 * @eqid: egress queue id
8664 * Stops an Ethernet egress queue. The queue can be reinitialized or
8665 * freed but is not otherwise functional after this call.
8667 int t4_eth_eq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
8668 unsigned int vf, unsigned int eqid)
8670 struct fw_eq_eth_cmd c;
8672 memset(&c, 0, sizeof(c));
8673 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
8674 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8675 V_FW_EQ_ETH_CMD_PFN(pf) |
8676 V_FW_EQ_ETH_CMD_VFN(vf));
8677 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_EQSTOP | FW_LEN16(c));
8678 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
8679 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8683 * t4_eth_eq_free - free an Ethernet egress queue
8684 * @adap: the adapter
8685 * @mbox: mailbox to use for the FW command
8686 * @pf: the PF owning the queue
8687 * @vf: the VF owning the queue
8688 * @eqid: egress queue id
8690 * Frees an Ethernet egress queue.
8692 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8693 unsigned int vf, unsigned int eqid)
8695 struct fw_eq_eth_cmd c;
8697 memset(&c, 0, sizeof(c));
8698 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
8699 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8700 V_FW_EQ_ETH_CMD_PFN(pf) |
8701 V_FW_EQ_ETH_CMD_VFN(vf));
8702 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
8703 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
8704 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8708 * t4_ctrl_eq_free - free a control egress queue
8709 * @adap: the adapter
8710 * @mbox: mailbox to use for the FW command
8711 * @pf: the PF owning the queue
8712 * @vf: the VF owning the queue
8713 * @eqid: egress queue id
8715 * Frees a control egress queue.
8717 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8718 unsigned int vf, unsigned int eqid)
8720 struct fw_eq_ctrl_cmd c;
8722 memset(&c, 0, sizeof(c));
8723 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
8724 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8725 V_FW_EQ_CTRL_CMD_PFN(pf) |
8726 V_FW_EQ_CTRL_CMD_VFN(vf));
8727 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
8728 c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
8729 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8733 * t4_ofld_eq_free - free an offload egress queue
8734 * @adap: the adapter
8735 * @mbox: mailbox to use for the FW command
8736 * @pf: the PF owning the queue
8737 * @vf: the VF owning the queue
8738 * @eqid: egress queue id
8740 * Frees a control egress queue.
8742 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8743 unsigned int vf, unsigned int eqid)
8745 struct fw_eq_ofld_cmd c;
8747 memset(&c, 0, sizeof(c));
8748 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) |
8749 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8750 V_FW_EQ_OFLD_CMD_PFN(pf) |
8751 V_FW_EQ_OFLD_CMD_VFN(vf));
8752 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
8753 c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid));
8754 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8758 * t4_link_down_rc_str - return a string for a Link Down Reason Code
8759 * @link_down_rc: Link Down Reason Code
8761 * Returns a string representation of the Link Down Reason Code.
8763 const char *t4_link_down_rc_str(unsigned char link_down_rc)
8765 static const char *reason[] = {
8768 "Auto-negotiation Failure",
8770 "Insufficient Airflow",
8771 "Unable To Determine Reason",
8772 "No RX Signal Detected",
8776 if (link_down_rc >= ARRAY_SIZE(reason))
8777 return "Bad Reason Code";
8779 return reason[link_down_rc];
8783 * Return the highest speed set in the port capabilities, in Mb/s.
8785 unsigned int fwcap_to_speed(uint32_t caps)
8787 #define TEST_SPEED_RETURN(__caps_speed, __speed) \
8789 if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
8793 TEST_SPEED_RETURN(400G, 400000);
8794 TEST_SPEED_RETURN(200G, 200000);
8795 TEST_SPEED_RETURN(100G, 100000);
8796 TEST_SPEED_RETURN(50G, 50000);
8797 TEST_SPEED_RETURN(40G, 40000);
8798 TEST_SPEED_RETURN(25G, 25000);
8799 TEST_SPEED_RETURN(10G, 10000);
8800 TEST_SPEED_RETURN(1G, 1000);
8801 TEST_SPEED_RETURN(100M, 100);
8803 #undef TEST_SPEED_RETURN
8809 * Return the port capabilities bit for the given speed, which is in Mb/s.
8811 uint32_t speed_to_fwcap(unsigned int speed)
8813 #define TEST_SPEED_RETURN(__caps_speed, __speed) \
8815 if (speed == __speed) \
8816 return FW_PORT_CAP32_SPEED_##__caps_speed; \
8819 TEST_SPEED_RETURN(400G, 400000);
8820 TEST_SPEED_RETURN(200G, 200000);
8821 TEST_SPEED_RETURN(100G, 100000);
8822 TEST_SPEED_RETURN(50G, 50000);
8823 TEST_SPEED_RETURN(40G, 40000);
8824 TEST_SPEED_RETURN(25G, 25000);
8825 TEST_SPEED_RETURN(10G, 10000);
8826 TEST_SPEED_RETURN(1G, 1000);
8827 TEST_SPEED_RETURN(100M, 100);
8829 #undef TEST_SPEED_RETURN
8835 * Return the port capabilities bit for the highest speed in the capabilities.
8837 uint32_t fwcap_top_speed(uint32_t caps)
8839 #define TEST_SPEED_RETURN(__caps_speed) \
8841 if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
8842 return FW_PORT_CAP32_SPEED_##__caps_speed; \
8845 TEST_SPEED_RETURN(400G);
8846 TEST_SPEED_RETURN(200G);
8847 TEST_SPEED_RETURN(100G);
8848 TEST_SPEED_RETURN(50G);
8849 TEST_SPEED_RETURN(40G);
8850 TEST_SPEED_RETURN(25G);
8851 TEST_SPEED_RETURN(10G);
8852 TEST_SPEED_RETURN(1G);
8853 TEST_SPEED_RETURN(100M);
8855 #undef TEST_SPEED_RETURN
8861 * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities
8862 * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value
8864 * Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new
8865 * 32-bit Port Capabilities value.
8867 static uint32_t lstatus_to_fwcap(u32 lstatus)
8869 uint32_t linkattr = 0;
8872 * Unfortunately the format of the Link Status in the old
8873 * 16-bit Port Information message isn't the same as the
8874 * 16-bit Port Capabilities bitfield used everywhere else ...
8876 if (lstatus & F_FW_PORT_CMD_RXPAUSE)
8877 linkattr |= FW_PORT_CAP32_FC_RX;
8878 if (lstatus & F_FW_PORT_CMD_TXPAUSE)
8879 linkattr |= FW_PORT_CAP32_FC_TX;
8880 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
8881 linkattr |= FW_PORT_CAP32_SPEED_100M;
8882 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
8883 linkattr |= FW_PORT_CAP32_SPEED_1G;
8884 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
8885 linkattr |= FW_PORT_CAP32_SPEED_10G;
8886 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
8887 linkattr |= FW_PORT_CAP32_SPEED_25G;
8888 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
8889 linkattr |= FW_PORT_CAP32_SPEED_40G;
8890 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
8891 linkattr |= FW_PORT_CAP32_SPEED_100G;
8897 * Updates all fields owned by the common code in port_info and link_config
8898 * based on information provided by the firmware. Does not touch any
8899 * requested_* field.
8901 static void handle_port_info(struct port_info *pi, const struct fw_port_cmd *p,
8902 enum fw_port_action action, bool *mod_changed, bool *link_changed)
8904 struct link_config old_lc, *lc = &pi->link_cfg;
8907 int old_ptype, old_mtype;
8909 old_ptype = pi->port_type;
8910 old_mtype = pi->mod_type;
8912 if (action == FW_PORT_ACTION_GET_PORT_INFO) {
8913 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
8915 pi->port_type = G_FW_PORT_CMD_PTYPE(stat);
8916 pi->mod_type = G_FW_PORT_CMD_MODTYPE(stat);
8917 pi->mdio_addr = stat & F_FW_PORT_CMD_MDIOCAP ?
8918 G_FW_PORT_CMD_MDIOADDR(stat) : -1;
8920 lc->pcaps = fwcaps16_to_caps32(be16_to_cpu(p->u.info.pcap));
8921 lc->acaps = fwcaps16_to_caps32(be16_to_cpu(p->u.info.acap));
8922 lc->lpacaps = fwcaps16_to_caps32(be16_to_cpu(p->u.info.lpacap));
8923 lc->link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
8924 lc->link_down_rc = G_FW_PORT_CMD_LINKDNRC(stat);
8926 linkattr = lstatus_to_fwcap(stat);
8927 } else if (action == FW_PORT_ACTION_GET_PORT_INFO32) {
8928 stat = be32_to_cpu(p->u.info32.lstatus32_to_cbllen32);
8930 pi->port_type = G_FW_PORT_CMD_PORTTYPE32(stat);
8931 pi->mod_type = G_FW_PORT_CMD_MODTYPE32(stat);
8932 pi->mdio_addr = stat & F_FW_PORT_CMD_MDIOCAP32 ?
8933 G_FW_PORT_CMD_MDIOADDR32(stat) : -1;
8935 lc->pcaps = be32_to_cpu(p->u.info32.pcaps32);
8936 lc->acaps = be32_to_cpu(p->u.info32.acaps32);
8937 lc->lpacaps = be32_to_cpu(p->u.info32.lpacaps32);
8938 lc->link_ok = (stat & F_FW_PORT_CMD_LSTATUS32) != 0;
8939 lc->link_down_rc = G_FW_PORT_CMD_LINKDNRC32(stat);
8941 linkattr = be32_to_cpu(p->u.info32.linkattr32);
8943 CH_ERR(pi->adapter, "bad port_info action 0x%x\n", action);
8947 lc->speed = fwcap_to_speed(linkattr);
8948 lc->fec = fwcap_to_fec(linkattr, true);
8951 if (linkattr & FW_PORT_CAP32_FC_RX)
8953 if (linkattr & FW_PORT_CAP32_FC_TX)
8957 if (mod_changed != NULL)
8958 *mod_changed = false;
8959 if (link_changed != NULL)
8960 *link_changed = false;
8961 if (old_ptype != pi->port_type || old_mtype != pi->mod_type ||
8962 old_lc.pcaps != lc->pcaps) {
8963 if (pi->mod_type != FW_PORT_MOD_TYPE_NONE)
8964 lc->fec_hint = fwcap_to_fec(lc->acaps, true);
8965 if (mod_changed != NULL)
8966 *mod_changed = true;
8968 if (old_lc.link_ok != lc->link_ok || old_lc.speed != lc->speed ||
8969 old_lc.fec != lc->fec || old_lc.fc != lc->fc) {
8970 if (link_changed != NULL)
8971 *link_changed = true;
8976 * t4_update_port_info - retrieve and update port information if changed
8977 * @pi: the port_info
8979 * We issue a Get Port Information Command to the Firmware and, if
8980 * successful, we check to see if anything is different from what we
8981 * last recorded and update things accordingly.
8983 int t4_update_port_info(struct port_info *pi)
8985 struct adapter *sc = pi->adapter;
8986 struct fw_port_cmd cmd;
8987 enum fw_port_action action;
8990 memset(&cmd, 0, sizeof(cmd));
8991 cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
8992 F_FW_CMD_REQUEST | F_FW_CMD_READ |
8993 V_FW_PORT_CMD_PORTID(pi->tx_chan));
8994 action = sc->params.port_caps32 ? FW_PORT_ACTION_GET_PORT_INFO32 :
8995 FW_PORT_ACTION_GET_PORT_INFO;
8996 cmd.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(action) |
8998 ret = t4_wr_mbox_ns(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
9002 handle_port_info(pi, &cmd, action, NULL, NULL);
9007 * t4_handle_fw_rpl - process a FW reply message
9008 * @adap: the adapter
9009 * @rpl: start of the FW message
9011 * Processes a FW message, such as link state change messages.
9013 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
9015 u8 opcode = *(const u8 *)rpl;
9016 const struct fw_port_cmd *p = (const void *)rpl;
9017 enum fw_port_action action =
9018 G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
9019 bool mod_changed, link_changed;
9021 if (opcode == FW_PORT_CMD &&
9022 (action == FW_PORT_ACTION_GET_PORT_INFO ||
9023 action == FW_PORT_ACTION_GET_PORT_INFO32)) {
9024 /* link/module state change message */
9026 int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
9027 struct port_info *pi = NULL;
9028 struct link_config *lc;
9030 for_each_port(adap, i) {
9031 pi = adap2pinfo(adap, i);
9032 if (pi->tx_chan == chan)
9038 handle_port_info(pi, p, action, &mod_changed, &link_changed);
9041 t4_os_portmod_changed(pi);
9044 t4_os_link_changed(pi);
9048 CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode);
9055 * get_pci_mode - determine a card's PCI mode
9056 * @adapter: the adapter
9057 * @p: where to store the PCI settings
9059 * Determines a card's PCI mode and associated parameters, such as speed
9062 static void get_pci_mode(struct adapter *adapter,
9063 struct pci_params *p)
9068 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
9070 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
9071 p->speed = val & PCI_EXP_LNKSTA_CLS;
9072 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
9077 u32 vendor_and_model_id;
9081 int t4_get_flash_params(struct adapter *adapter)
9084 * Table for non-standard supported Flash parts. Note, all Flash
9085 * parts must have 64KB sectors.
9087 static struct flash_desc supported_flash[] = {
9088 { 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
9093 unsigned int part, manufacturer;
9094 unsigned int density, size = 0;
9098 * Issue a Read ID Command to the Flash part. We decode supported
9099 * Flash parts and their sizes from this. There's a newer Query
9100 * Command which can retrieve detailed geometry information but many
9101 * Flash parts don't support it.
9103 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
9105 ret = sf1_read(adapter, 3, 0, 1, &flashid);
9106 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
9111 * Check to see if it's one of our non-standard supported Flash parts.
9113 for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
9114 if (supported_flash[part].vendor_and_model_id == flashid) {
9115 adapter->params.sf_size =
9116 supported_flash[part].size_mb;
9117 adapter->params.sf_nsec =
9118 adapter->params.sf_size / SF_SEC_SIZE;
9123 * Decode Flash part size. The code below looks repetative with
9124 * common encodings, but that's not guaranteed in the JEDEC
9125 * specification for the Read JADEC ID command. The only thing that
9126 * we're guaranteed by the JADEC specification is where the
9127 * Manufacturer ID is in the returned result. After that each
9128 * Manufacturer ~could~ encode things completely differently.
9129 * Note, all Flash parts must have 64KB sectors.
9131 manufacturer = flashid & 0xff;
9132 switch (manufacturer) {
9133 case 0x20: /* Micron/Numonix */
9135 * This Density -> Size decoding table is taken from Micron
9138 density = (flashid >> 16) & 0xff;
9140 case 0x14: size = 1 << 20; break; /* 1MB */
9141 case 0x15: size = 1 << 21; break; /* 2MB */
9142 case 0x16: size = 1 << 22; break; /* 4MB */
9143 case 0x17: size = 1 << 23; break; /* 8MB */
9144 case 0x18: size = 1 << 24; break; /* 16MB */
9145 case 0x19: size = 1 << 25; break; /* 32MB */
9146 case 0x20: size = 1 << 26; break; /* 64MB */
9147 case 0x21: size = 1 << 27; break; /* 128MB */
9148 case 0x22: size = 1 << 28; break; /* 256MB */
9152 case 0x9d: /* ISSI -- Integrated Silicon Solution, Inc. */
9154 * This Density -> Size decoding table is taken from ISSI
9157 density = (flashid >> 16) & 0xff;
9159 case 0x16: size = 1 << 25; break; /* 32MB */
9160 case 0x17: size = 1 << 26; break; /* 64MB */
9164 case 0xc2: /* Macronix */
9166 * This Density -> Size decoding table is taken from Macronix
9169 density = (flashid >> 16) & 0xff;
9171 case 0x17: size = 1 << 23; break; /* 8MB */
9172 case 0x18: size = 1 << 24; break; /* 16MB */
9176 case 0xef: /* Winbond */
9178 * This Density -> Size decoding table is taken from Winbond
9181 density = (flashid >> 16) & 0xff;
9183 case 0x17: size = 1 << 23; break; /* 8MB */
9184 case 0x18: size = 1 << 24; break; /* 16MB */
9189 /* If we didn't recognize the FLASH part, that's no real issue: the
9190 * Hardware/Software contract says that Hardware will _*ALWAYS*_
9191 * use a FLASH part which is at least 4MB in size and has 64KB
9192 * sectors. The unrecognized FLASH part is likely to be much larger
9193 * than 4MB, but that's all we really need.
9196 CH_WARN(adapter, "Unknown Flash Part, ID = %#x, assuming 4MB\n", flashid);
9201 * Store decoded Flash size and fall through into vetting code.
9203 adapter->params.sf_size = size;
9204 adapter->params.sf_nsec = size / SF_SEC_SIZE;
9208 * We should ~probably~ reject adapters with FLASHes which are too
9209 * small but we have some legacy FPGAs with small FLASHes that we'd
9210 * still like to use. So instead we emit a scary message ...
9212 if (adapter->params.sf_size < FLASH_MIN_SIZE)
9213 CH_WARN(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
9214 flashid, adapter->params.sf_size, FLASH_MIN_SIZE);
9219 static void set_pcie_completion_timeout(struct adapter *adapter,
9225 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
9227 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
9230 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
9234 const struct chip_params *t4_get_chip_params(int chipid)
9236 static const struct chip_params chip_params[] = {
9240 .pm_stats_cnt = PM_NSTATS,
9241 .cng_ch_bits_log = 2,
9243 .cim_num_obq = CIM_NUM_OBQ,
9244 .mps_rplc_size = 128,
9246 .sge_fl_db = F_DBPRIO,
9247 .mps_tcam_size = NUM_MPS_CLS_SRAM_L_INSTANCES,
9248 .rss_nentries = RSS_NENTRIES,
9253 .pm_stats_cnt = PM_NSTATS,
9254 .cng_ch_bits_log = 2,
9256 .cim_num_obq = CIM_NUM_OBQ_T5,
9257 .mps_rplc_size = 128,
9259 .sge_fl_db = F_DBPRIO | F_DBTYPE,
9260 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
9261 .rss_nentries = RSS_NENTRIES,
9266 .pm_stats_cnt = T6_PM_NSTATS,
9267 .cng_ch_bits_log = 3,
9269 .cim_num_obq = CIM_NUM_OBQ_T5,
9270 .mps_rplc_size = 256,
9273 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
9274 .rss_nentries = T6_RSS_NENTRIES,
9278 chipid -= CHELSIO_T4;
9279 if (chipid < 0 || chipid >= ARRAY_SIZE(chip_params))
9282 return &chip_params[chipid];
9286 * t4_prep_adapter - prepare SW and HW for operation
9287 * @adapter: the adapter
9288 * @buf: temporary space of at least VPD_LEN size provided by the caller.
9290 * Initialize adapter SW state for the various HW modules, set initial
9291 * values for some adapter tunables, take PHYs out of reset, and
9292 * initialize the MDIO interface.
9294 int t4_prep_adapter(struct adapter *adapter, u32 *buf)
9300 get_pci_mode(adapter, &adapter->params.pci);
9302 pl_rev = t4_read_reg(adapter, A_PL_REV);
9303 adapter->params.chipid = G_CHIPID(pl_rev);
9304 adapter->params.rev = G_REV(pl_rev);
9305 if (adapter->params.chipid == 0) {
9306 /* T4 did not have chipid in PL_REV (T5 onwards do) */
9307 adapter->params.chipid = CHELSIO_T4;
9309 /* T4A1 chip is not supported */
9310 if (adapter->params.rev == 1) {
9311 CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n");
9316 adapter->chip_params = t4_get_chip_params(chip_id(adapter));
9317 if (adapter->chip_params == NULL)
9320 adapter->params.pci.vpd_cap_addr =
9321 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
9323 ret = t4_get_flash_params(adapter);
9327 /* Cards with real ASICs have the chipid in the PCIe device id */
9328 t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id);
9329 if (device_id >> 12 == chip_id(adapter))
9330 adapter->params.cim_la_size = CIMLA_SIZE;
9333 adapter->params.fpga = 1;
9334 adapter->params.cim_la_size = 2 * CIMLA_SIZE;
9337 ret = get_vpd_params(adapter, &adapter->params.vpd, device_id, buf);
9341 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
9344 * Default port and clock for debugging in case we can't reach FW.
9346 adapter->params.nports = 1;
9347 adapter->params.portvec = 1;
9348 adapter->params.vpd.cclk = 50000;
9350 /* Set pci completion timeout value to 4 seconds. */
9351 set_pcie_completion_timeout(adapter, 0xd);
9356 * t4_shutdown_adapter - shut down adapter, host & wire
9357 * @adapter: the adapter
9359 * Perform an emergency shutdown of the adapter and stop it from
9360 * continuing any further communication on the ports or DMA to the
9361 * host. This is typically used when the adapter and/or firmware
9362 * have crashed and we want to prevent any further accidental
9363 * communication with the rest of the world. This will also force
9364 * the port Link Status to go down -- if register writes work --
9365 * which should help our peers figure out that we're down.
9367 int t4_shutdown_adapter(struct adapter *adapter)
9371 t4_intr_disable(adapter);
9372 t4_write_reg(adapter, A_DBG_GPIO_EN, 0);
9373 for_each_port(adapter, port) {
9374 u32 a_port_cfg = is_t4(adapter) ?
9375 PORT_REG(port, A_XGMAC_PORT_CFG) :
9376 T5_PORT_REG(port, A_MAC_PORT_CFG);
9378 t4_write_reg(adapter, a_port_cfg,
9379 t4_read_reg(adapter, a_port_cfg)
9380 & ~V_SIGNAL_DET(1));
9382 t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0);
9388 * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
9389 * @adapter: the adapter
9390 * @qid: the Queue ID
9391 * @qtype: the Ingress or Egress type for @qid
9392 * @user: true if this request is for a user mode queue
9393 * @pbar2_qoffset: BAR2 Queue Offset
9394 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
9396 * Returns the BAR2 SGE Queue Registers information associated with the
9397 * indicated Absolute Queue ID. These are passed back in return value
9398 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
9399 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
9401 * This may return an error which indicates that BAR2 SGE Queue
9402 * registers aren't available. If an error is not returned, then the
9403 * following values are returned:
9405 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
9406 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
9408 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
9409 * require the "Inferred Queue ID" ability may be used. E.g. the
9410 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
9411 * then these "Inferred Queue ID" register may not be used.
9413 int t4_bar2_sge_qregs(struct adapter *adapter,
9415 enum t4_bar2_qtype qtype,
9418 unsigned int *pbar2_qid)
9420 unsigned int page_shift, page_size, qpp_shift, qpp_mask;
9421 u64 bar2_page_offset, bar2_qoffset;
9422 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
9424 /* T4 doesn't support BAR2 SGE Queue registers for kernel
9427 if (!user && is_t4(adapter))
9430 /* Get our SGE Page Size parameters.
9432 page_shift = adapter->params.sge.page_shift;
9433 page_size = 1 << page_shift;
9435 /* Get the right Queues per Page parameters for our Queue.
9437 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
9438 ? adapter->params.sge.eq_s_qpp
9439 : adapter->params.sge.iq_s_qpp);
9440 qpp_mask = (1 << qpp_shift) - 1;
9442 /* Calculate the basics of the BAR2 SGE Queue register area:
9443 * o The BAR2 page the Queue registers will be in.
9444 * o The BAR2 Queue ID.
9445 * o The BAR2 Queue ID Offset into the BAR2 page.
9447 bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
9448 bar2_qid = qid & qpp_mask;
9449 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
9451 /* If the BAR2 Queue ID Offset is less than the Page Size, then the
9452 * hardware will infer the Absolute Queue ID simply from the writes to
9453 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
9454 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
9455 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
9456 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
9457 * from the BAR2 Page and BAR2 Queue ID.
9459 * One important censequence of this is that some BAR2 SGE registers
9460 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
9461 * there. But other registers synthesize the SGE Queue ID purely
9462 * from the writes to the registers -- the Write Combined Doorbell
9463 * Buffer is a good example. These BAR2 SGE Registers are only
9464 * available for those BAR2 SGE Register areas where the SGE Absolute
9465 * Queue ID can be inferred from simple writes.
9467 bar2_qoffset = bar2_page_offset;
9468 bar2_qinferred = (bar2_qid_offset < page_size);
9469 if (bar2_qinferred) {
9470 bar2_qoffset += bar2_qid_offset;
9474 *pbar2_qoffset = bar2_qoffset;
9475 *pbar2_qid = bar2_qid;
9480 * t4_init_devlog_params - initialize adapter->params.devlog
9481 * @adap: the adapter
9482 * @fw_attach: whether we can talk to the firmware
9484 * Initialize various fields of the adapter's Firmware Device Log
9485 * Parameters structure.
9487 int t4_init_devlog_params(struct adapter *adap, int fw_attach)
9489 struct devlog_params *dparams = &adap->params.devlog;
9491 unsigned int devlog_meminfo;
9492 struct fw_devlog_cmd devlog_cmd;
9495 /* If we're dealing with newer firmware, the Device Log Paramerters
9496 * are stored in a designated register which allows us to access the
9497 * Device Log even if we can't talk to the firmware.
9500 t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG));
9502 unsigned int nentries, nentries128;
9504 dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams);
9505 dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4;
9507 nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams);
9508 nentries = (nentries128 + 1) * 128;
9509 dparams->size = nentries * sizeof(struct fw_devlog_e);
9515 * For any failing returns ...
9517 memset(dparams, 0, sizeof *dparams);
9520 * If we can't talk to the firmware, there's really nothing we can do
9526 /* Otherwise, ask the firmware for it's Device Log Parameters.
9528 memset(&devlog_cmd, 0, sizeof devlog_cmd);
9529 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
9530 F_FW_CMD_REQUEST | F_FW_CMD_READ);
9531 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
9532 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
9538 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
9539 dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo);
9540 dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4;
9541 dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
9547 * t4_init_sge_params - initialize adap->params.sge
9548 * @adapter: the adapter
9550 * Initialize various fields of the adapter's SGE Parameters structure.
9552 int t4_init_sge_params(struct adapter *adapter)
9555 struct sge_params *sp = &adapter->params.sge;
9556 unsigned i, tscale = 1;
9558 r = t4_read_reg(adapter, A_SGE_INGRESS_RX_THRESHOLD);
9559 sp->counter_val[0] = G_THRESHOLD_0(r);
9560 sp->counter_val[1] = G_THRESHOLD_1(r);
9561 sp->counter_val[2] = G_THRESHOLD_2(r);
9562 sp->counter_val[3] = G_THRESHOLD_3(r);
9564 if (chip_id(adapter) >= CHELSIO_T6) {
9565 r = t4_read_reg(adapter, A_SGE_ITP_CONTROL);
9566 tscale = G_TSCALE(r);
9573 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_0_AND_1);
9574 sp->timer_val[0] = core_ticks_to_us(adapter, G_TIMERVALUE0(r)) * tscale;
9575 sp->timer_val[1] = core_ticks_to_us(adapter, G_TIMERVALUE1(r)) * tscale;
9576 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_2_AND_3);
9577 sp->timer_val[2] = core_ticks_to_us(adapter, G_TIMERVALUE2(r)) * tscale;
9578 sp->timer_val[3] = core_ticks_to_us(adapter, G_TIMERVALUE3(r)) * tscale;
9579 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_4_AND_5);
9580 sp->timer_val[4] = core_ticks_to_us(adapter, G_TIMERVALUE4(r)) * tscale;
9581 sp->timer_val[5] = core_ticks_to_us(adapter, G_TIMERVALUE5(r)) * tscale;
9583 r = t4_read_reg(adapter, A_SGE_CONM_CTRL);
9584 sp->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1;
9586 sp->fl_starve_threshold2 = sp->fl_starve_threshold;
9587 else if (is_t5(adapter))
9588 sp->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(r) * 2 + 1;
9590 sp->fl_starve_threshold2 = G_T6_EGRTHRESHOLDPACKING(r) * 2 + 1;
9592 /* egress queues: log2 of # of doorbells per BAR2 page */
9593 r = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
9594 r >>= S_QUEUESPERPAGEPF0 +
9595 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
9596 sp->eq_s_qpp = r & M_QUEUESPERPAGEPF0;
9598 /* ingress queues: log2 of # of doorbells per BAR2 page */
9599 r = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
9600 r >>= S_QUEUESPERPAGEPF0 +
9601 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
9602 sp->iq_s_qpp = r & M_QUEUESPERPAGEPF0;
9604 r = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
9605 r >>= S_HOSTPAGESIZEPF0 +
9606 (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf;
9607 sp->page_shift = (r & M_HOSTPAGESIZEPF0) + 10;
9609 r = t4_read_reg(adapter, A_SGE_CONTROL);
9610 sp->sge_control = r;
9611 sp->spg_len = r & F_EGRSTATUSPAGESIZE ? 128 : 64;
9612 sp->fl_pktshift = G_PKTSHIFT(r);
9613 if (chip_id(adapter) <= CHELSIO_T5) {
9614 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) +
9615 X_INGPADBOUNDARY_SHIFT);
9617 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) +
9618 X_T6_INGPADBOUNDARY_SHIFT);
9621 sp->pack_boundary = sp->pad_boundary;
9623 r = t4_read_reg(adapter, A_SGE_CONTROL2);
9624 if (G_INGPACKBOUNDARY(r) == 0)
9625 sp->pack_boundary = 16;
9627 sp->pack_boundary = 1 << (G_INGPACKBOUNDARY(r) + 5);
9629 for (i = 0; i < SGE_FLBUF_SIZES; i++)
9630 sp->sge_fl_buffer_size[i] = t4_read_reg(adapter,
9631 A_SGE_FL_BUFFER_SIZE0 + (4 * i));
9637 * Read and cache the adapter's compressed filter mode and ingress config.
9639 static void read_filter_mode_and_ingress_config(struct adapter *adap,
9643 struct tp_params *tpp = &adap->params.tp;
9645 t4_tp_pio_read(adap, &tpp->vlan_pri_map, 1, A_TP_VLAN_PRI_MAP,
9647 t4_tp_pio_read(adap, &tpp->ingress_config, 1, A_TP_INGRESS_CONFIG,
9651 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
9652 * shift positions of several elements of the Compressed Filter Tuple
9653 * for this adapter which we need frequently ...
9655 tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE);
9656 tpp->port_shift = t4_filter_field_shift(adap, F_PORT);
9657 tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
9658 tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN);
9659 tpp->tos_shift = t4_filter_field_shift(adap, F_TOS);
9660 tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
9661 tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE);
9662 tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH);
9663 tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE);
9664 tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION);
9666 if (chip_id(adap) > CHELSIO_T4) {
9667 v = t4_read_reg(adap, LE_HASH_MASK_GEN_IPV4T5(3));
9668 adap->params.tp.hash_filter_mask = v;
9669 v = t4_read_reg(adap, LE_HASH_MASK_GEN_IPV4T5(4));
9670 adap->params.tp.hash_filter_mask |= (u64)v << 32;
9675 * t4_init_tp_params - initialize adap->params.tp
9676 * @adap: the adapter
9678 * Initialize various fields of the adapter's TP Parameters structure.
9680 int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
9683 u32 tx_len, rx_len, r, v;
9684 struct tp_params *tpp = &adap->params.tp;
9686 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
9687 tpp->tre = G_TIMERRESOLUTION(v);
9688 tpp->dack_re = G_DELAYEDACKRESOLUTION(v);
9690 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
9691 for (chan = 0; chan < MAX_NCHAN; chan++)
9692 tpp->tx_modq[chan] = chan;
9694 read_filter_mode_and_ingress_config(adap, sleep_ok);
9696 if (chip_id(adap) > CHELSIO_T5) {
9697 v = t4_read_reg(adap, A_TP_OUT_CONFIG);
9698 tpp->rx_pkt_encap = v & F_CRXPKTENC;
9700 tpp->rx_pkt_encap = false;
9702 rx_len = t4_read_reg(adap, A_TP_PMM_RX_PAGE_SIZE);
9703 tx_len = t4_read_reg(adap, A_TP_PMM_TX_PAGE_SIZE);
9705 r = t4_read_reg(adap, A_TP_PARA_REG2);
9706 rx_len = min(rx_len, G_MAXRXDATA(r));
9707 tx_len = min(tx_len, G_MAXRXDATA(r));
9709 r = t4_read_reg(adap, A_TP_PARA_REG7);
9710 v = min(G_PMMAXXFERLEN0(r), G_PMMAXXFERLEN1(r));
9711 rx_len = min(rx_len, v);
9712 tx_len = min(tx_len, v);
9714 tpp->max_tx_pdu = tx_len;
9715 tpp->max_rx_pdu = rx_len;
9721 * t4_filter_field_shift - calculate filter field shift
9722 * @adap: the adapter
9723 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
9725 * Return the shift position of a filter field within the Compressed
9726 * Filter Tuple. The filter field is specified via its selection bit
9727 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
9729 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
9731 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
9735 if ((filter_mode & filter_sel) == 0)
9738 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
9739 switch (filter_mode & sel) {
9741 field_shift += W_FT_FCOE;
9744 field_shift += W_FT_PORT;
9747 field_shift += W_FT_VNIC_ID;
9750 field_shift += W_FT_VLAN;
9753 field_shift += W_FT_TOS;
9756 field_shift += W_FT_PROTOCOL;
9759 field_shift += W_FT_ETHERTYPE;
9762 field_shift += W_FT_MACMATCH;
9765 field_shift += W_FT_MPSHITTYPE;
9767 case F_FRAGMENTATION:
9768 field_shift += W_FT_FRAGMENTATION;
9775 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id)
9779 struct port_info *p = adap2pinfo(adap, port_id);
9781 struct vi_info *vi = &p->vi[0];
9783 for (i = 0, j = -1; i <= p->port_id; i++) {
9786 } while ((adap->params.portvec & (1 << j)) == 0);
9790 p->mps_bg_map = t4_get_mps_bg_map(adap, j);
9791 p->rx_e_chan_map = t4_get_rx_e_chan_map(adap, j);
9794 if (!(adap->flags & IS_VF) ||
9795 adap->params.vfres.r_caps & FW_CMD_CAP_PORT) {
9796 t4_update_port_info(p);
9799 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &vi->rss_size,
9800 &vi->vfvld, &vi->vin);
9805 t4_os_set_hw_addr(p, addr);
9807 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
9808 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
9809 V_FW_PARAMS_PARAM_YZ(vi->viid);
9810 ret = t4_query_params(adap, mbox, pf, vf, 1, ¶m, &val);
9812 vi->rss_base = 0xffff;
9814 /* MPASS((val >> 16) == rss_size); */
9815 vi->rss_base = val & 0xffff;
9822 * t4_read_cimq_cfg - read CIM queue configuration
9823 * @adap: the adapter
9824 * @base: holds the queue base addresses in bytes
9825 * @size: holds the queue sizes in bytes
9826 * @thres: holds the queue full thresholds in bytes
9828 * Returns the current configuration of the CIM queues, starting with
9829 * the IBQs, then the OBQs.
9831 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
9834 int cim_num_obq = adap->chip_params->cim_num_obq;
9836 for (i = 0; i < CIM_NUM_IBQ; i++) {
9837 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
9839 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
9840 /* value is in 256-byte units */
9841 *base++ = G_CIMQBASE(v) * 256;
9842 *size++ = G_CIMQSIZE(v) * 256;
9843 *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
9845 for (i = 0; i < cim_num_obq; i++) {
9846 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
9848 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
9849 /* value is in 256-byte units */
9850 *base++ = G_CIMQBASE(v) * 256;
9851 *size++ = G_CIMQSIZE(v) * 256;
9856 * t4_read_cim_ibq - read the contents of a CIM inbound queue
9857 * @adap: the adapter
9858 * @qid: the queue index
9859 * @data: where to store the queue contents
9860 * @n: capacity of @data in 32-bit words
9862 * Reads the contents of the selected CIM queue starting at address 0 up
9863 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
9864 * error and the number of 32-bit words actually read on success.
9866 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9868 int i, err, attempts;
9870 const unsigned int nwords = CIM_IBQ_SIZE * 4;
9872 if (qid > 5 || (n & 3))
9875 addr = qid * nwords;
9879 /* It might take 3-10ms before the IBQ debug read access is allowed.
9880 * Wait for 1 Sec with a delay of 1 usec.
9884 for (i = 0; i < n; i++, addr++) {
9885 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
9887 err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
9891 *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
9893 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
9898 * t4_read_cim_obq - read the contents of a CIM outbound queue
9899 * @adap: the adapter
9900 * @qid: the queue index
9901 * @data: where to store the queue contents
9902 * @n: capacity of @data in 32-bit words
9904 * Reads the contents of the selected CIM queue starting at address 0 up
9905 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
9906 * error and the number of 32-bit words actually read on success.
9908 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9911 unsigned int addr, v, nwords;
9912 int cim_num_obq = adap->chip_params->cim_num_obq;
9914 if ((qid > (cim_num_obq - 1)) || (n & 3))
9917 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
9918 V_QUENUMSELECT(qid));
9919 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
9921 addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */
9922 nwords = G_CIMQSIZE(v) * 64; /* same */
9926 for (i = 0; i < n; i++, addr++) {
9927 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
9929 err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
9933 *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
9935 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
9941 CIM_CTL_BASE = 0x2000,
9942 CIM_PBT_ADDR_BASE = 0x2800,
9943 CIM_PBT_LRF_BASE = 0x3000,
9944 CIM_PBT_DATA_BASE = 0x3800
9948 * t4_cim_read - read a block from CIM internal address space
9949 * @adap: the adapter
9950 * @addr: the start address within the CIM address space
9951 * @n: number of words to read
9952 * @valp: where to store the result
9954 * Reads a block of 4-byte words from the CIM intenal address space.
9956 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
9961 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
9964 for ( ; !ret && n--; addr += 4) {
9965 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
9966 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
9969 *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
9975 * t4_cim_write - write a block into CIM internal address space
9976 * @adap: the adapter
9977 * @addr: the start address within the CIM address space
9978 * @n: number of words to write
9979 * @valp: set of values to write
9981 * Writes a block of 4-byte words into the CIM intenal address space.
9983 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
9984 const unsigned int *valp)
9988 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
9991 for ( ; !ret && n--; addr += 4) {
9992 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
9993 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
9994 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
10000 static int t4_cim_write1(struct adapter *adap, unsigned int addr,
10003 return t4_cim_write(adap, addr, 1, &val);
10007 * t4_cim_ctl_read - read a block from CIM control region
10008 * @adap: the adapter
10009 * @addr: the start address within the CIM control region
10010 * @n: number of words to read
10011 * @valp: where to store the result
10013 * Reads a block of 4-byte words from the CIM control region.
10015 int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
10016 unsigned int *valp)
10018 return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
10022 * t4_cim_read_la - read CIM LA capture buffer
10023 * @adap: the adapter
10024 * @la_buf: where to store the LA data
10025 * @wrptr: the HW write pointer within the capture buffer
10027 * Reads the contents of the CIM LA buffer with the most recent entry at
10028 * the end of the returned data and with the entry at @wrptr first.
10029 * We try to leave the LA in the running state we find it in.
10031 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
10034 unsigned int cfg, val, idx;
10036 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
10040 if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */
10041 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
10046 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
10050 idx = G_UPDBGLAWRPTR(val);
10054 for (i = 0; i < adap->params.cim_la_size; i++) {
10055 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
10056 V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
10059 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
10062 if (val & F_UPDBGLARDEN) {
10066 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
10070 /* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
10071 * identify the 32-bit portion of the full 312-bit data
10073 if (is_t6(adap) && (idx & 0xf) >= 9)
10074 idx = (idx & 0xff0) + 0x10;
10077 /* address can't exceed 0xfff */
10078 idx &= M_UPDBGLARDPTR;
10081 if (cfg & F_UPDBGLAEN) {
10082 int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
10083 cfg & ~F_UPDBGLARDEN);
10091 * t4_tp_read_la - read TP LA capture buffer
10092 * @adap: the adapter
10093 * @la_buf: where to store the LA data
10094 * @wrptr: the HW write pointer within the capture buffer
10096 * Reads the contents of the TP LA buffer with the most recent entry at
10097 * the end of the returned data and with the entry at @wrptr first.
10098 * We leave the LA in the running state we find it in.
10100 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
10102 bool last_incomplete;
10103 unsigned int i, cfg, val, idx;
10105 cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
10106 if (cfg & F_DBGLAENABLE) /* freeze LA */
10107 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
10108 adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
10110 val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
10111 idx = G_DBGLAWPTR(val);
10112 last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
10113 if (last_incomplete)
10114 idx = (idx + 1) & M_DBGLARPTR;
10119 val &= ~V_DBGLARPTR(M_DBGLARPTR);
10120 val |= adap->params.tp.la_mask;
10122 for (i = 0; i < TPLA_SIZE; i++) {
10123 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
10124 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
10125 idx = (idx + 1) & M_DBGLARPTR;
10128 /* Wipe out last entry if it isn't valid */
10129 if (last_incomplete)
10130 la_buf[TPLA_SIZE - 1] = ~0ULL;
10132 if (cfg & F_DBGLAENABLE) /* restore running state */
10133 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
10134 cfg | adap->params.tp.la_mask);
10138 * SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
10139 * seconds). If we find one of the SGE Ingress DMA State Machines in the same
10140 * state for more than the Warning Threshold then we'll issue a warning about
10141 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel
10142 * appears to be hung every Warning Repeat second till the situation clears.
10143 * If the situation clears, we'll note that as well.
10145 #define SGE_IDMA_WARN_THRESH 1
10146 #define SGE_IDMA_WARN_REPEAT 300
10149 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
10150 * @adapter: the adapter
10151 * @idma: the adapter IDMA Monitor state
10153 * Initialize the state of an SGE Ingress DMA Monitor.
10155 void t4_idma_monitor_init(struct adapter *adapter,
10156 struct sge_idma_monitor_state *idma)
10158 /* Initialize the state variables for detecting an SGE Ingress DMA
10159 * hang. The SGE has internal counters which count up on each clock
10160 * tick whenever the SGE finds its Ingress DMA State Engines in the
10161 * same state they were on the previous clock tick. The clock used is
10162 * the Core Clock so we have a limit on the maximum "time" they can
10163 * record; typically a very small number of seconds. For instance,
10164 * with a 600MHz Core Clock, we can only count up to a bit more than
10165 * 7s. So we'll synthesize a larger counter in order to not run the
10166 * risk of having the "timers" overflow and give us the flexibility to
10167 * maintain a Hung SGE State Machine of our own which operates across
10168 * a longer time frame.
10170 idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
10171 idma->idma_stalled[0] = idma->idma_stalled[1] = 0;
10175 * t4_idma_monitor - monitor SGE Ingress DMA state
10176 * @adapter: the adapter
10177 * @idma: the adapter IDMA Monitor state
10178 * @hz: number of ticks/second
10179 * @ticks: number of ticks since the last IDMA Monitor call
10181 void t4_idma_monitor(struct adapter *adapter,
10182 struct sge_idma_monitor_state *idma,
10185 int i, idma_same_state_cnt[2];
10187 /* Read the SGE Debug Ingress DMA Same State Count registers. These
10188 * are counters inside the SGE which count up on each clock when the
10189 * SGE finds its Ingress DMA State Engines in the same states they
10190 * were in the previous clock. The counters will peg out at
10191 * 0xffffffff without wrapping around so once they pass the 1s
10192 * threshold they'll stay above that till the IDMA state changes.
10194 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13);
10195 idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH);
10196 idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
10198 for (i = 0; i < 2; i++) {
10199 u32 debug0, debug11;
10201 /* If the Ingress DMA Same State Counter ("timer") is less
10202 * than 1s, then we can reset our synthesized Stall Timer and
10203 * continue. If we have previously emitted warnings about a
10204 * potential stalled Ingress Queue, issue a note indicating
10205 * that the Ingress Queue has resumed forward progress.
10207 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
10208 if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz)
10209 CH_WARN(adapter, "SGE idma%d, queue %u, "
10210 "resumed after %d seconds\n",
10211 i, idma->idma_qid[i],
10212 idma->idma_stalled[i]/hz);
10213 idma->idma_stalled[i] = 0;
10217 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
10218 * domain. The first time we get here it'll be because we
10219 * passed the 1s Threshold; each additional time it'll be
10220 * because the RX Timer Callback is being fired on its regular
10223 * If the stall is below our Potential Hung Ingress Queue
10224 * Warning Threshold, continue.
10226 if (idma->idma_stalled[i] == 0) {
10227 idma->idma_stalled[i] = hz;
10228 idma->idma_warn[i] = 0;
10230 idma->idma_stalled[i] += ticks;
10231 idma->idma_warn[i] -= ticks;
10234 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz)
10237 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
10239 if (idma->idma_warn[i] > 0)
10241 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz;
10243 /* Read and save the SGE IDMA State and Queue ID information.
10244 * We do this every time in case it changes across time ...
10245 * can't be too careful ...
10247 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0);
10248 debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
10249 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
10251 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11);
10252 debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
10253 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
10255 CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in "
10256 " state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
10257 i, idma->idma_qid[i], idma->idma_state[i],
10258 idma->idma_stalled[i]/hz,
10260 t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
10265 * t4_set_vf_mac - Set MAC address for the specified VF
10266 * @adapter: The adapter
10267 * @pf: the PF used to instantiate the VFs
10268 * @vf: one of the VFs instantiated by the specified PF
10269 * @naddr: the number of MAC addresses
10270 * @addr: the MAC address(es) to be set to the specified VF
10272 int t4_set_vf_mac(struct adapter *adapter, unsigned int pf, unsigned int vf,
10273 unsigned int naddr, u8 *addr)
10275 struct fw_acl_mac_cmd cmd;
10277 memset(&cmd, 0, sizeof(cmd));
10278 cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_ACL_MAC_CMD) |
10281 V_FW_ACL_MAC_CMD_PFN(pf) |
10282 V_FW_ACL_MAC_CMD_VFN(vf));
10284 /* Note: Do not enable the ACL */
10285 cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
10290 memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3));
10293 memcpy(cmd.macaddr2, addr, sizeof(cmd.macaddr2));
10296 memcpy(cmd.macaddr1, addr, sizeof(cmd.macaddr1));
10299 memcpy(cmd.macaddr0, addr, sizeof(cmd.macaddr0));
10303 return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd);
10307 * t4_read_pace_tbl - read the pace table
10308 * @adap: the adapter
10309 * @pace_vals: holds the returned values
10311 * Returns the values of TP's pace table in microseconds.
10313 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
10317 for (i = 0; i < NTX_SCHED; i++) {
10318 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
10319 v = t4_read_reg(adap, A_TP_PACE_TABLE);
10320 pace_vals[i] = dack_ticks_to_usec(adap, v);
10325 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
10326 * @adap: the adapter
10327 * @sched: the scheduler index
10328 * @kbps: the byte rate in Kbps
10329 * @ipg: the interpacket delay in tenths of nanoseconds
10331 * Return the current configuration of a HW Tx scheduler.
10333 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
10334 unsigned int *ipg, bool sleep_ok)
10336 unsigned int v, addr, bpt, cpt;
10339 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
10340 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
10343 bpt = (v >> 8) & 0xff;
10346 *kbps = 0; /* scheduler disabled */
10348 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
10349 *kbps = (v * bpt) / 125;
10353 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
10354 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
10358 *ipg = (10000 * v) / core_ticks_per_usec(adap);
10363 * t4_load_cfg - download config file
10364 * @adap: the adapter
10365 * @cfg_data: the cfg text file to write
10366 * @size: text file size
10368 * Write the supplied config text file to the card's serial flash.
10370 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
10372 int ret, i, n, cfg_addr;
10374 unsigned int flash_cfg_start_sec;
10375 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10377 cfg_addr = t4_flash_cfg_addr(adap);
10382 flash_cfg_start_sec = addr / SF_SEC_SIZE;
10384 if (size > FLASH_CFG_MAX_SIZE) {
10385 CH_ERR(adap, "cfg file too large, max is %u bytes\n",
10386 FLASH_CFG_MAX_SIZE);
10390 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
10392 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
10393 flash_cfg_start_sec + i - 1);
10395 * If size == 0 then we're simply erasing the FLASH sectors associated
10396 * with the on-adapter Firmware Configuration File.
10398 if (ret || size == 0)
10401 /* this will write to the flash up to SF_PAGE_SIZE at a time */
10402 for (i = 0; i< size; i+= SF_PAGE_SIZE) {
10403 if ( (size - i) < SF_PAGE_SIZE)
10407 ret = t4_write_flash(adap, addr, n, cfg_data, 1);
10411 addr += SF_PAGE_SIZE;
10412 cfg_data += SF_PAGE_SIZE;
10417 CH_ERR(adap, "config file %s failed %d\n",
10418 (size == 0 ? "clear" : "download"), ret);
10423 * t5_fw_init_extern_mem - initialize the external memory
10424 * @adap: the adapter
10426 * Initializes the external memory on T5.
10428 int t5_fw_init_extern_mem(struct adapter *adap)
10430 u32 params[1], val[1];
10436 val[0] = 0xff; /* Initialize all MCs */
10437 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
10438 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT));
10439 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val,
10440 FW_CMD_MAX_TIMEOUT);
10445 /* BIOS boot headers */
10446 typedef struct pci_expansion_rom_header {
10447 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
10448 u8 reserved[22]; /* Reserved per processor Architecture data */
10449 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
10450 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
10452 /* Legacy PCI Expansion ROM Header */
10453 typedef struct legacy_pci_expansion_rom_header {
10454 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
10455 u8 size512; /* Current Image Size in units of 512 bytes */
10456 u8 initentry_point[4];
10457 u8 cksum; /* Checksum computed on the entire Image */
10458 u8 reserved[16]; /* Reserved */
10459 u8 pcir_offset[2]; /* Offset to PCI Data Struture */
10460 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
10462 /* EFI PCI Expansion ROM Header */
10463 typedef struct efi_pci_expansion_rom_header {
10464 u8 signature[2]; // ROM signature. The value 0xaa55
10465 u8 initialization_size[2]; /* Units 512. Includes this header */
10466 u8 efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
10467 u8 efi_subsystem[2]; /* Subsystem value for EFI image header */
10468 u8 efi_machine_type[2]; /* Machine type from EFI image header */
10469 u8 compression_type[2]; /* Compression type. */
10471 * Compression type definition
10472 * 0x0: uncompressed
10474 * 0x2-0xFFFF: Reserved
10476 u8 reserved[8]; /* Reserved */
10477 u8 efi_image_header_offset[2]; /* Offset to EFI Image */
10478 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
10479 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
10481 /* PCI Data Structure Format */
10482 typedef struct pcir_data_structure { /* PCI Data Structure */
10483 u8 signature[4]; /* Signature. The string "PCIR" */
10484 u8 vendor_id[2]; /* Vendor Identification */
10485 u8 device_id[2]; /* Device Identification */
10486 u8 vital_product[2]; /* Pointer to Vital Product Data */
10487 u8 length[2]; /* PCIR Data Structure Length */
10488 u8 revision; /* PCIR Data Structure Revision */
10489 u8 class_code[3]; /* Class Code */
10490 u8 image_length[2]; /* Image Length. Multiple of 512B */
10491 u8 code_revision[2]; /* Revision Level of Code/Data */
10492 u8 code_type; /* Code Type. */
10494 * PCI Expansion ROM Code Types
10495 * 0x00: Intel IA-32, PC-AT compatible. Legacy
10496 * 0x01: Open Firmware standard for PCI. FCODE
10497 * 0x02: Hewlett-Packard PA RISC. HP reserved
10498 * 0x03: EFI Image. EFI
10499 * 0x04-0xFF: Reserved.
10501 u8 indicator; /* Indicator. Identifies the last image in the ROM */
10502 u8 reserved[2]; /* Reserved */
10503 } pcir_data_t; /* PCI__DATA_STRUCTURE */
10505 /* BOOT constants */
10507 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
10508 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */
10509 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */
10510 BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
10511 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment */
10512 VENDOR_ID = 0x1425, /* Vendor ID */
10513 PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
10517 * modify_device_id - Modifies the device ID of the Boot BIOS image
10518 * @adatper: the device ID to write.
10519 * @boot_data: the boot image to modify.
10521 * Write the supplied device ID to the boot BIOS image.
10523 static void modify_device_id(int device_id, u8 *boot_data)
10525 legacy_pci_exp_rom_header_t *header;
10526 pcir_data_t *pcir_header;
10527 u32 cur_header = 0;
10530 * Loop through all chained images and change the device ID's
10533 header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
10534 pcir_header = (pcir_data_t *) &boot_data[cur_header +
10535 le16_to_cpu(*(u16*)header->pcir_offset)];
10538 * Only modify the Device ID if code type is Legacy or HP.
10539 * 0x00: Okay to modify
10540 * 0x01: FCODE. Do not be modify
10541 * 0x03: Okay to modify
10542 * 0x04-0xFF: Do not modify
10544 if (pcir_header->code_type == 0x00) {
10549 * Modify Device ID to match current adatper
10551 *(u16*) pcir_header->device_id = device_id;
10554 * Set checksum temporarily to 0.
10555 * We will recalculate it later.
10557 header->cksum = 0x0;
10560 * Calculate and update checksum
10562 for (i = 0; i < (header->size512 * 512); i++)
10563 csum += (u8)boot_data[cur_header + i];
10566 * Invert summed value to create the checksum
10567 * Writing new checksum value directly to the boot data
10569 boot_data[cur_header + 7] = -csum;
10571 } else if (pcir_header->code_type == 0x03) {
10574 * Modify Device ID to match current adatper
10576 *(u16*) pcir_header->device_id = device_id;
10582 * Check indicator element to identify if this is the last
10583 * image in the ROM.
10585 if (pcir_header->indicator & 0x80)
10589 * Move header pointer up to the next image in the ROM.
10591 cur_header += header->size512 * 512;
10596 * t4_load_boot - download boot flash
10597 * @adapter: the adapter
10598 * @boot_data: the boot image to write
10599 * @boot_addr: offset in flash to write boot_data
10600 * @size: image size
10602 * Write the supplied boot image to the card's serial flash.
10603 * The boot image has the following sections: a 28-byte header and the
10606 int t4_load_boot(struct adapter *adap, u8 *boot_data,
10607 unsigned int boot_addr, unsigned int size)
10609 pci_exp_rom_header_t *header;
10611 pcir_data_t *pcir_header;
10613 uint16_t device_id;
10615 unsigned int boot_sector = (boot_addr * 1024 );
10616 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10619 * Make sure the boot image does not encroach on the firmware region
10621 if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
10622 CH_ERR(adap, "boot image encroaching on firmware region\n");
10627 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
10628 * and Boot configuration data sections. These 3 boot sections span
10629 * sectors 0 to 7 in flash and live right before the FW image location.
10631 i = DIV_ROUND_UP(size ? size : FLASH_FW_START,
10633 ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
10634 (boot_sector >> 16) + i - 1);
10637 * If size == 0 then we're simply erasing the FLASH sectors associated
10638 * with the on-adapter option ROM file
10640 if (ret || (size == 0))
10643 /* Get boot header */
10644 header = (pci_exp_rom_header_t *)boot_data;
10645 pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
10646 /* PCIR Data Structure */
10647 pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
10650 * Perform some primitive sanity testing to avoid accidentally
10651 * writing garbage over the boot sectors. We ought to check for
10652 * more but it's not worth it for now ...
10654 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
10655 CH_ERR(adap, "boot image too small/large\n");
10659 #ifndef CHELSIO_T4_DIAGS
10661 * Check BOOT ROM header signature
10663 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
10664 CH_ERR(adap, "Boot image missing signature\n");
10669 * Check PCI header signature
10671 if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
10672 CH_ERR(adap, "PCI header missing signature\n");
10677 * Check Vendor ID matches Chelsio ID
10679 if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
10680 CH_ERR(adap, "Vendor ID missing signature\n");
10686 * Retrieve adapter's device ID
10688 t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
10689 /* Want to deal with PF 0 so I strip off PF 4 indicator */
10690 device_id = device_id & 0xf0ff;
10693 * Check PCIE Device ID
10695 if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
10697 * Change the device ID in the Boot BIOS image to match
10698 * the Device ID of the current adapter.
10700 modify_device_id(device_id, boot_data);
10704 * Skip over the first SF_PAGE_SIZE worth of data and write it after
10705 * we finish copying the rest of the boot image. This will ensure
10706 * that the BIOS boot header will only be written if the boot image
10707 * was written in full.
10709 addr = boot_sector;
10710 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
10711 addr += SF_PAGE_SIZE;
10712 boot_data += SF_PAGE_SIZE;
10713 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
10718 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
10719 (const u8 *)header, 0);
10723 CH_ERR(adap, "boot image download failed, error %d\n", ret);
10728 * t4_flash_bootcfg_addr - return the address of the flash optionrom configuration
10729 * @adapter: the adapter
10731 * Return the address within the flash where the OptionROM Configuration
10732 * is stored, or an error if the device FLASH is too small to contain
10733 * a OptionROM Configuration.
10735 static int t4_flash_bootcfg_addr(struct adapter *adapter)
10738 * If the device FLASH isn't large enough to hold a Firmware
10739 * Configuration File, return an error.
10741 if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE)
10744 return FLASH_BOOTCFG_START;
10747 int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size)
10749 int ret, i, n, cfg_addr;
10751 unsigned int flash_cfg_start_sec;
10752 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10754 cfg_addr = t4_flash_bootcfg_addr(adap);
10759 flash_cfg_start_sec = addr / SF_SEC_SIZE;
10761 if (size > FLASH_BOOTCFG_MAX_SIZE) {
10762 CH_ERR(adap, "bootcfg file too large, max is %u bytes\n",
10763 FLASH_BOOTCFG_MAX_SIZE);
10767 i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */
10769 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
10770 flash_cfg_start_sec + i - 1);
10773 * If size == 0 then we're simply erasing the FLASH sectors associated
10774 * with the on-adapter OptionROM Configuration File.
10776 if (ret || size == 0)
10779 /* this will write to the flash up to SF_PAGE_SIZE at a time */
10780 for (i = 0; i< size; i+= SF_PAGE_SIZE) {
10781 if ( (size - i) < SF_PAGE_SIZE)
10785 ret = t4_write_flash(adap, addr, n, cfg_data, 0);
10789 addr += SF_PAGE_SIZE;
10790 cfg_data += SF_PAGE_SIZE;
10795 CH_ERR(adap, "boot config data %s failed %d\n",
10796 (size == 0 ? "clear" : "download"), ret);
10801 * t4_set_filter_mode - configure the optional components of filter tuples
10802 * @adap: the adapter
10803 * @mode_map: a bitmap selcting which optional filter components to enable
10804 * @sleep_ok: if true we may sleep while awaiting command completion
10806 * Sets the filter mode by selecting the optional components to enable
10807 * in filter tuples. Returns 0 on success and a negative error if the
10808 * requested mode needs more bits than are available for optional
10811 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map,
10814 static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
10818 for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
10819 if (mode_map & (1 << i))
10821 if (nbits > FILTER_OPT_LEN)
10823 t4_tp_pio_write(adap, &mode_map, 1, A_TP_VLAN_PRI_MAP, sleep_ok);
10824 read_filter_mode_and_ingress_config(adap, sleep_ok);
10830 * t4_clr_port_stats - clear port statistics
10831 * @adap: the adapter
10832 * @idx: the port index
10834 * Clear HW statistics for the given port.
10836 void t4_clr_port_stats(struct adapter *adap, int idx)
10839 u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map;
10840 u32 port_base_addr;
10843 port_base_addr = PORT_BASE(idx);
10845 port_base_addr = T5_PORT_BASE(idx);
10847 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
10848 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
10849 t4_write_reg(adap, port_base_addr + i, 0);
10850 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
10851 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
10852 t4_write_reg(adap, port_base_addr + i, 0);
10853 for (i = 0; i < 4; i++)
10854 if (bgmap & (1 << i)) {
10856 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
10858 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
10863 * t4_i2c_io - read/write I2C data from adapter
10864 * @adap: the adapter
10865 * @port: Port number if per-port device; <0 if not
10866 * @devid: per-port device ID or absolute device ID
10867 * @offset: byte offset into device I2C space
10868 * @len: byte length of I2C space data
10869 * @buf: buffer in which to return I2C data for read
10870 * buffer which holds the I2C data for write
10871 * @write: if true, do a write; else do a read
10872 * Reads/Writes the I2C data from/to the indicated device and location.
10874 int t4_i2c_io(struct adapter *adap, unsigned int mbox,
10875 int port, unsigned int devid,
10876 unsigned int offset, unsigned int len,
10877 u8 *buf, bool write)
10879 struct fw_ldst_cmd ldst_cmd, ldst_rpl;
10880 unsigned int i2c_max = sizeof(ldst_cmd.u.i2c.data);
10883 if (len > I2C_PAGE_SIZE)
10886 /* Dont allow reads that spans multiple pages */
10887 if (offset < I2C_PAGE_SIZE && offset + len > I2C_PAGE_SIZE)
10890 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
10891 ldst_cmd.op_to_addrspace =
10892 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
10894 (write ? F_FW_CMD_WRITE : F_FW_CMD_READ) |
10895 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C));
10896 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
10897 ldst_cmd.u.i2c.pid = (port < 0 ? 0xff : port);
10898 ldst_cmd.u.i2c.did = devid;
10901 unsigned int i2c_len = (len < i2c_max) ? len : i2c_max;
10903 ldst_cmd.u.i2c.boffset = offset;
10904 ldst_cmd.u.i2c.blen = i2c_len;
10907 memcpy(ldst_cmd.u.i2c.data, buf, i2c_len);
10909 ret = t4_wr_mbox(adap, mbox, &ldst_cmd, sizeof(ldst_cmd),
10910 write ? NULL : &ldst_rpl);
10915 memcpy(buf, ldst_rpl.u.i2c.data, i2c_len);
10924 int t4_i2c_rd(struct adapter *adap, unsigned int mbox,
10925 int port, unsigned int devid,
10926 unsigned int offset, unsigned int len,
10929 return t4_i2c_io(adap, mbox, port, devid, offset, len, buf, false);
10932 int t4_i2c_wr(struct adapter *adap, unsigned int mbox,
10933 int port, unsigned int devid,
10934 unsigned int offset, unsigned int len,
10937 return t4_i2c_io(adap, mbox, port, devid, offset, len, buf, true);
10941 * t4_sge_ctxt_rd - read an SGE context through FW
10942 * @adap: the adapter
10943 * @mbox: mailbox to use for the FW command
10944 * @cid: the context id
10945 * @ctype: the context type
10946 * @data: where to store the context data
10948 * Issues a FW command through the given mailbox to read an SGE context.
10950 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
10951 enum ctxt_type ctype, u32 *data)
10954 struct fw_ldst_cmd c;
10956 if (ctype == CTXT_EGRESS)
10957 ret = FW_LDST_ADDRSPC_SGE_EGRC;
10958 else if (ctype == CTXT_INGRESS)
10959 ret = FW_LDST_ADDRSPC_SGE_INGC;
10960 else if (ctype == CTXT_FLM)
10961 ret = FW_LDST_ADDRSPC_SGE_FLMC;
10963 ret = FW_LDST_ADDRSPC_SGE_CONMC;
10965 memset(&c, 0, sizeof(c));
10966 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
10967 F_FW_CMD_REQUEST | F_FW_CMD_READ |
10968 V_FW_LDST_CMD_ADDRSPACE(ret));
10969 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
10970 c.u.idctxt.physid = cpu_to_be32(cid);
10972 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
10974 data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
10975 data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
10976 data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
10977 data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
10978 data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
10979 data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
10985 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
10986 * @adap: the adapter
10987 * @cid: the context id
10988 * @ctype: the context type
10989 * @data: where to store the context data
10991 * Reads an SGE context directly, bypassing FW. This is only for
10992 * debugging when FW is unavailable.
10994 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
10999 t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
11000 ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
11002 for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
11003 *data++ = t4_read_reg(adap, i);
11007 int t4_sched_config(struct adapter *adapter, int type, int minmaxen,
11010 struct fw_sched_cmd cmd;
11012 memset(&cmd, 0, sizeof(cmd));
11013 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
11016 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
11018 cmd.u.config.sc = FW_SCHED_SC_CONFIG;
11019 cmd.u.config.type = type;
11020 cmd.u.config.minmaxen = minmaxen;
11022 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
11026 int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
11027 int rateunit, int ratemode, int channel, int cl,
11028 int minrate, int maxrate, int weight, int pktsize,
11029 int burstsize, int sleep_ok)
11031 struct fw_sched_cmd cmd;
11033 memset(&cmd, 0, sizeof(cmd));
11034 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
11037 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
11039 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
11040 cmd.u.params.type = type;
11041 cmd.u.params.level = level;
11042 cmd.u.params.mode = mode;
11043 cmd.u.params.ch = channel;
11044 cmd.u.params.cl = cl;
11045 cmd.u.params.unit = rateunit;
11046 cmd.u.params.rate = ratemode;
11047 cmd.u.params.min = cpu_to_be32(minrate);
11048 cmd.u.params.max = cpu_to_be32(maxrate);
11049 cmd.u.params.weight = cpu_to_be16(weight);
11050 cmd.u.params.pktsize = cpu_to_be16(pktsize);
11051 cmd.u.params.burstsize = cpu_to_be16(burstsize);
11053 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
11057 int t4_sched_params_ch_rl(struct adapter *adapter, int channel, int ratemode,
11058 unsigned int maxrate, int sleep_ok)
11060 struct fw_sched_cmd cmd;
11062 memset(&cmd, 0, sizeof(cmd));
11063 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
11066 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
11068 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
11069 cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
11070 cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CH_RL;
11071 cmd.u.params.ch = channel;
11072 cmd.u.params.rate = ratemode; /* REL or ABS */
11073 cmd.u.params.max = cpu_to_be32(maxrate);/* % or kbps */
11075 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
11079 int t4_sched_params_cl_wrr(struct adapter *adapter, int channel, int cl,
11080 int weight, int sleep_ok)
11082 struct fw_sched_cmd cmd;
11084 if (weight < 0 || weight > 100)
11087 memset(&cmd, 0, sizeof(cmd));
11088 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
11091 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
11093 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
11094 cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
11095 cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_WRR;
11096 cmd.u.params.ch = channel;
11097 cmd.u.params.cl = cl;
11098 cmd.u.params.weight = cpu_to_be16(weight);
11100 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
11104 int t4_sched_params_cl_rl_kbps(struct adapter *adapter, int channel, int cl,
11105 int mode, unsigned int maxrate, int pktsize, int sleep_ok)
11107 struct fw_sched_cmd cmd;
11109 memset(&cmd, 0, sizeof(cmd));
11110 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
11113 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
11115 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
11116 cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
11117 cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_RL;
11118 cmd.u.params.mode = mode;
11119 cmd.u.params.ch = channel;
11120 cmd.u.params.cl = cl;
11121 cmd.u.params.unit = FW_SCHED_PARAMS_UNIT_BITRATE;
11122 cmd.u.params.rate = FW_SCHED_PARAMS_RATE_ABS;
11123 cmd.u.params.max = cpu_to_be32(maxrate);
11124 cmd.u.params.pktsize = cpu_to_be16(pktsize);
11126 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
11131 * t4_config_watchdog - configure (enable/disable) a watchdog timer
11132 * @adapter: the adapter
11133 * @mbox: mailbox to use for the FW command
11134 * @pf: the PF owning the queue
11135 * @vf: the VF owning the queue
11136 * @timeout: watchdog timeout in ms
11137 * @action: watchdog timer / action
11139 * There are separate watchdog timers for each possible watchdog
11140 * action. Configure one of the watchdog timers by setting a non-zero
11141 * timeout. Disable a watchdog timer by using a timeout of zero.
11143 int t4_config_watchdog(struct adapter *adapter, unsigned int mbox,
11144 unsigned int pf, unsigned int vf,
11145 unsigned int timeout, unsigned int action)
11147 struct fw_watchdog_cmd wdog;
11148 unsigned int ticks;
11151 * The watchdog command expects a timeout in units of 10ms so we need
11152 * to convert it here (via rounding) and force a minimum of one 10ms
11153 * "tick" if the timeout is non-zero but the conversion results in 0
11156 ticks = (timeout + 5)/10;
11157 if (timeout && !ticks)
11160 memset(&wdog, 0, sizeof wdog);
11161 wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) |
11164 V_FW_PARAMS_CMD_PFN(pf) |
11165 V_FW_PARAMS_CMD_VFN(vf));
11166 wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog));
11167 wdog.timeout = cpu_to_be32(ticks);
11168 wdog.action = cpu_to_be32(action);
11170 return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL);
11173 int t4_get_devlog_level(struct adapter *adapter, unsigned int *level)
11175 struct fw_devlog_cmd devlog_cmd;
11178 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
11179 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
11180 F_FW_CMD_REQUEST | F_FW_CMD_READ);
11181 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
11182 ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
11183 sizeof(devlog_cmd), &devlog_cmd);
11187 *level = devlog_cmd.level;
11191 int t4_set_devlog_level(struct adapter *adapter, unsigned int level)
11193 struct fw_devlog_cmd devlog_cmd;
11195 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
11196 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
11199 devlog_cmd.level = level;
11200 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
11201 return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
11202 sizeof(devlog_cmd), &devlog_cmd);
11205 int t4_configure_add_smac(struct adapter *adap)
11207 unsigned int param, val;
11210 adap->params.smac_add_support = 0;
11211 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
11212 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_ADD_SMAC));
11213 /* Query FW to check if FW supports adding source mac address
11214 * to TCAM feature or not.
11215 * If FW returns 1, driver can use this feature and driver need to send
11216 * FW_PARAMS_PARAM_DEV_ADD_SMAC write command with value 1 to
11217 * enable adding smac to TCAM.
11219 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
11224 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
11227 /* Firmware allows adding explicit TCAM entries.
11228 * Save this internally.
11230 adap->params.smac_add_support = 1;
11236 int t4_configure_ringbb(struct adapter *adap)
11238 unsigned int param, val;
11241 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
11242 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RING_BACKBONE));
11243 /* Query FW to check if FW supports ring switch feature or not.
11244 * If FW returns 1, driver can use this feature and driver need to send
11245 * FW_PARAMS_PARAM_DEV_RING_BACKBONE write command with value 1 to
11246 * enable the ring backbone configuration.
11248 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
11250 CH_ERR(adap, "Querying FW using Ring backbone params command failed, err=%d\n",
11256 CH_ERR(adap, "FW doesnot support ringbackbone features\n");
11260 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
11262 CH_ERR(adap, "Could not set Ringbackbone, err= %d\n",
11272 * t4_set_vlan_acl - Set a VLAN id for the specified VF
11273 * @adapter: the adapter
11274 * @mbox: mailbox to use for the FW command
11275 * @vf: one of the VFs instantiated by the specified PF
11276 * @vlan: The vlanid to be set
11279 int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
11282 struct fw_acl_vlan_cmd vlan_cmd;
11283 unsigned int enable;
11285 enable = (vlan ? F_FW_ACL_VLAN_CMD_EN : 0);
11286 memset(&vlan_cmd, 0, sizeof(vlan_cmd));
11287 vlan_cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_ACL_VLAN_CMD) |
11291 V_FW_ACL_VLAN_CMD_PFN(adap->pf) |
11292 V_FW_ACL_VLAN_CMD_VFN(vf));
11293 vlan_cmd.en_to_len16 = cpu_to_be32(enable | FW_LEN16(vlan_cmd));
11294 /* Drop all packets that donot match vlan id */
11295 vlan_cmd.dropnovlan_fm = (enable
11296 ? (F_FW_ACL_VLAN_CMD_DROPNOVLAN |
11297 F_FW_ACL_VLAN_CMD_FM)
11300 vlan_cmd.nvlan = 1;
11301 vlan_cmd.vlanid[0] = cpu_to_be16(vlan);
11304 return t4_wr_mbox(adap, adap->mbox, &vlan_cmd, sizeof(vlan_cmd), NULL);
11308 * t4_del_mac - Removes the exact-match filter for a MAC address
11309 * @adap: the adapter
11310 * @mbox: mailbox to use for the FW command
11312 * @addr: the MAC address value
11313 * @smac: if true, delete from only the smac region of MPS
11315 * Modifies an exact-match filter and sets it to the new MAC address if
11316 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
11317 * latter case the address is added persistently if @persist is %true.
11319 * Returns a negative error number or the index of the filter with the new
11320 * MAC value. Note that this index may differ from @idx.
11322 int t4_del_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
11323 const u8 *addr, bool smac)
11326 struct fw_vi_mac_cmd c;
11327 struct fw_vi_mac_exact *p = c.u.exact;
11328 unsigned int max_mac_addr = adap->chip_params->mps_tcam_size;
11330 memset(&c, 0, sizeof(c));
11331 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
11332 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
11333 V_FW_VI_MAC_CMD_VIID(viid));
11334 c.freemacs_to_len16 = cpu_to_be32(
11335 V_FW_CMD_LEN16(1) |
11336 (smac ? F_FW_VI_MAC_CMD_IS_SMAC : 0));
11338 memcpy(p->macaddr, addr, sizeof(p->macaddr));
11339 p->valid_to_idx = cpu_to_be16(
11340 F_FW_VI_MAC_CMD_VALID |
11341 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_MAC_BASED_FREE));
11343 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
11345 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
11346 if (ret < max_mac_addr)
11354 * t4_add_mac - Adds an exact-match filter for a MAC address
11355 * @adap: the adapter
11356 * @mbox: mailbox to use for the FW command
11358 * @idx: index of existing filter for old value of MAC address, or -1
11359 * @addr: the new MAC address value
11360 * @persist: whether a new MAC allocation should be persistent
11361 * @add_smt: if true also add the address to the HW SMT
11362 * @smac: if true, update only the smac region of MPS
11364 * Modifies an exact-match filter and sets it to the new MAC address if
11365 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
11366 * latter case the address is added persistently if @persist is %true.
11368 * Returns a negative error number or the index of the filter with the new
11369 * MAC value. Note that this index may differ from @idx.
11371 int t4_add_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
11372 int idx, const u8 *addr, bool persist, u8 *smt_idx, bool smac)
11375 struct fw_vi_mac_cmd c;
11376 struct fw_vi_mac_exact *p = c.u.exact;
11377 unsigned int max_mac_addr = adap->chip_params->mps_tcam_size;
11379 if (idx < 0) /* new allocation */
11380 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
11381 mode = smt_idx ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
11383 memset(&c, 0, sizeof(c));
11384 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
11385 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
11386 V_FW_VI_MAC_CMD_VIID(viid));
11387 c.freemacs_to_len16 = cpu_to_be32(
11388 V_FW_CMD_LEN16(1) |
11389 (smac ? F_FW_VI_MAC_CMD_IS_SMAC : 0));
11390 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
11391 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
11392 V_FW_VI_MAC_CMD_IDX(idx));
11393 memcpy(p->macaddr, addr, sizeof(p->macaddr));
11395 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
11397 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
11398 if (ret >= max_mac_addr)
11401 /* Does fw supports returning smt_idx? */
11402 if (adap->params.viid_smt_extn_support)
11403 *smt_idx = G_FW_VI_MAC_CMD_SMTID(be32_to_cpu(c.op_to_viid));
11405 /* In T4/T5, SMT contains 256 SMAC entries
11406 * organized in 128 rows of 2 entries each.
11407 * In T6, SMT contains 256 SMAC entries in
11410 if (chip_id(adap) <= CHELSIO_T5)
11411 *smt_idx = ((viid & M_FW_VIID_VIN) << 1);
11413 *smt_idx = (viid & M_FW_VIID_VIN);