2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2012, 2016 Chelsio Communications, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/eventhandler.h>
39 #include "t4_regs_values.h"
40 #include "firmware/t4fw_interface.h"
43 #define msleep(x) do { \
47 pause("t4hw", (x) * hz / 1000); \
51 * t4_wait_op_done_val - wait until an operation is completed
52 * @adapter: the adapter performing the operation
53 * @reg: the register to check for completion
54 * @mask: a single-bit field within @reg that indicates completion
55 * @polarity: the value of the field when the operation is completed
56 * @attempts: number of check iterations
57 * @delay: delay in usecs between iterations
58 * @valp: where to store the value of the register at completion time
60 * Wait until an operation is completed by checking a bit in a register
61 * up to @attempts times. If @valp is not NULL the value of the register
62 * at the time it indicated completion is stored there. Returns 0 if the
63 * operation completes and -EAGAIN otherwise.
65 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
66 int polarity, int attempts, int delay, u32 *valp)
69 u32 val = t4_read_reg(adapter, reg);
71 if (!!(val & mask) == polarity) {
83 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
84 int polarity, int attempts, int delay)
86 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
91 * t4_set_reg_field - set a register field to a value
92 * @adapter: the adapter to program
93 * @addr: the register address
94 * @mask: specifies the portion of the register to modify
95 * @val: the new value for the register field
97 * Sets a register field specified by the supplied mask to the
100 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
103 u32 v = t4_read_reg(adapter, addr) & ~mask;
105 t4_write_reg(adapter, addr, v | val);
106 (void) t4_read_reg(adapter, addr); /* flush */
110 * t4_read_indirect - read indirectly addressed registers
112 * @addr_reg: register holding the indirect address
113 * @data_reg: register holding the value of the indirect register
114 * @vals: where the read register values are stored
115 * @nregs: how many indirect registers to read
116 * @start_idx: index of first indirect register to read
118 * Reads registers that are accessed indirectly through an address/data
121 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
122 unsigned int data_reg, u32 *vals,
123 unsigned int nregs, unsigned int start_idx)
126 t4_write_reg(adap, addr_reg, start_idx);
127 *vals++ = t4_read_reg(adap, data_reg);
133 * t4_write_indirect - write indirectly addressed registers
135 * @addr_reg: register holding the indirect addresses
136 * @data_reg: register holding the value for the indirect registers
137 * @vals: values to write
138 * @nregs: how many indirect registers to write
139 * @start_idx: address of first indirect register to write
141 * Writes a sequential block of registers that are accessed indirectly
142 * through an address/data register pair.
144 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
145 unsigned int data_reg, const u32 *vals,
146 unsigned int nregs, unsigned int start_idx)
149 t4_write_reg(adap, addr_reg, start_idx++);
150 t4_write_reg(adap, data_reg, *vals++);
155 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
156 * mechanism. This guarantees that we get the real value even if we're
157 * operating within a Virtual Machine and the Hypervisor is trapping our
158 * Configuration Space accesses.
160 * N.B. This routine should only be used as a last resort: the firmware uses
161 * the backdoor registers on a regular basis and we can end up
162 * conflicting with it's uses!
164 u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
166 u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg);
169 if (chip_id(adap) <= CHELSIO_T5)
177 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req);
178 val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
181 * Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
182 * Configuration Space read. (None of the other fields matter when
183 * F_ENABLE is 0 so a simple register write is easier than a
184 * read-modify-write via t4_set_reg_field().)
186 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0);
192 * t4_report_fw_error - report firmware error
195 * The adapter firmware can indicate error conditions to the host.
196 * If the firmware has indicated an error, print out the reason for
197 * the firmware error.
199 static void t4_report_fw_error(struct adapter *adap)
201 static const char *const reason[] = {
202 "Crash", /* PCIE_FW_EVAL_CRASH */
203 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
204 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
205 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
206 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
207 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
208 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
209 "Reserved", /* reserved */
213 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
214 if (pcie_fw & F_PCIE_FW_ERR) {
215 adap->flags &= ~FW_OK;
216 CH_ERR(adap, "firmware reports adapter error: %s (0x%08x)\n",
217 reason[G_PCIE_FW_EVAL(pcie_fw)], pcie_fw);
218 if (pcie_fw != 0xffffffff)
219 t4_os_dump_devlog(adap);
224 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
226 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
229 for ( ; nflit; nflit--, mbox_addr += 8)
230 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
234 * Handle a FW assertion reported in a mailbox.
236 static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt)
239 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
240 asrt->u.assert.filename_0_7,
241 be32_to_cpu(asrt->u.assert.line),
242 be32_to_cpu(asrt->u.assert.x),
243 be32_to_cpu(asrt->u.assert.y));
246 struct port_tx_state {
252 read_tx_state_one(struct adapter *sc, int i, struct port_tx_state *tx_state)
254 uint32_t rx_pause_reg, tx_frames_reg;
257 tx_frames_reg = PORT_REG(i, A_MPS_PORT_STAT_TX_PORT_FRAMES_L);
258 rx_pause_reg = PORT_REG(i, A_MPS_PORT_STAT_RX_PORT_PAUSE_L);
260 tx_frames_reg = T5_PORT_REG(i, A_MPS_PORT_STAT_TX_PORT_FRAMES_L);
261 rx_pause_reg = T5_PORT_REG(i, A_MPS_PORT_STAT_RX_PORT_PAUSE_L);
264 tx_state->rx_pause = t4_read_reg64(sc, rx_pause_reg);
265 tx_state->tx_frames = t4_read_reg64(sc, tx_frames_reg);
269 read_tx_state(struct adapter *sc, struct port_tx_state *tx_state)
274 read_tx_state_one(sc, i, &tx_state[i]);
278 check_tx_state(struct adapter *sc, struct port_tx_state *tx_state)
280 uint32_t port_ctl_reg;
281 uint64_t tx_frames, rx_pause;
284 for_each_port(sc, i) {
285 rx_pause = tx_state[i].rx_pause;
286 tx_frames = tx_state[i].tx_frames;
287 read_tx_state_one(sc, i, &tx_state[i]); /* update */
290 port_ctl_reg = PORT_REG(i, A_MPS_PORT_CTL);
292 port_ctl_reg = T5_PORT_REG(i, A_MPS_PORT_CTL);
293 if (t4_read_reg(sc, port_ctl_reg) & F_PORTTXEN &&
294 rx_pause != tx_state[i].rx_pause &&
295 tx_frames == tx_state[i].tx_frames) {
296 t4_set_reg_field(sc, port_ctl_reg, F_PORTTXEN, 0);
298 t4_set_reg_field(sc, port_ctl_reg, F_PORTTXEN, F_PORTTXEN);
303 #define X_CIM_PF_NOACCESS 0xeeeeeeee
305 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
307 * @mbox: index of the mailbox to use
308 * @cmd: the command to write
309 * @size: command length in bytes
310 * @rpl: where to optionally store the reply
311 * @sleep_ok: if true we may sleep while awaiting command completion
312 * @timeout: time to wait for command to finish before timing out
313 * (negative implies @sleep_ok=false)
315 * Sends the given command to FW through the selected mailbox and waits
316 * for the FW to execute the command. If @rpl is not %NULL it is used to
317 * store the FW's reply to the command. The command and its optional
318 * reply are of the same length. Some FW commands like RESET and
319 * INITIALIZE can take a considerable amount of time to execute.
320 * @sleep_ok determines whether we may sleep while awaiting the response.
321 * If sleeping is allowed we use progressive backoff otherwise we spin.
322 * Note that passing in a negative @timeout is an alternate mechanism
323 * for specifying @sleep_ok=false. This is useful when a higher level
324 * interface allows for specification of @timeout but not @sleep_ok ...
326 * The return value is 0 on success or a negative errno on failure. A
327 * failure can happen either because we are not able to execute the
328 * command or FW executes it but signals an error. In the latter case
329 * the return value is the error code indicated by FW (negated).
331 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
332 int size, void *rpl, bool sleep_ok, int timeout)
335 * We delay in small increments at first in an effort to maintain
336 * responsiveness for simple, fast executing commands but then back
337 * off to larger delays to a maximum retry delay.
339 static const int delay[] = {
340 1, 1, 3, 5, 10, 10, 20, 50, 100
344 int i, ms, delay_idx, ret, next_tx_check;
345 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
346 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
348 __be64 cmd_rpl[MBOX_LEN/8];
350 struct port_tx_state tx_state[MAX_NPORTS];
352 if (adap->flags & CHK_MBOX_ACCESS)
353 ASSERT_SYNCHRONIZED_OP(adap);
355 if (size <= 0 || (size & 15) || size > MBOX_LEN)
358 if (adap->flags & IS_VF) {
360 data_reg = FW_T6VF_MBDATA_BASE_ADDR;
362 data_reg = FW_T4VF_MBDATA_BASE_ADDR;
363 ctl_reg = VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL);
367 * If we have a negative timeout, that implies that we can't sleep.
375 * Attempt to gain access to the mailbox.
377 for (i = 0; i < 4; i++) {
378 ctl = t4_read_reg(adap, ctl_reg);
380 if (v != X_MBOWNER_NONE)
385 * If we were unable to gain access, report the error to our caller.
387 if (v != X_MBOWNER_PL) {
388 t4_report_fw_error(adap);
389 ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
394 * If we gain ownership of the mailbox and there's a "valid" message
395 * in it, this is likely an asynchronous error message from the
396 * firmware. So we'll report that and then proceed on with attempting
397 * to issue our own command ... which may well fail if the error
398 * presaged the firmware crashing ...
400 if (ctl & F_MBMSGVALID) {
401 CH_DUMP_MBOX(adap, mbox, data_reg, "VLD", NULL, true);
405 * Copy in the new mailbox command and send it on its way ...
407 memset(cmd_rpl, 0, sizeof(cmd_rpl));
408 memcpy(cmd_rpl, cmd, size);
409 CH_DUMP_MBOX(adap, mbox, 0, "cmd", cmd_rpl, false);
410 for (i = 0; i < ARRAY_SIZE(cmd_rpl); i++)
411 t4_write_reg64(adap, data_reg + i * 8, be64_to_cpu(cmd_rpl[i]));
413 if (adap->flags & IS_VF) {
415 * For the VFs, the Mailbox Data "registers" are
416 * actually backed by T4's "MA" interface rather than
417 * PL Registers (as is the case for the PFs). Because
418 * these are in different coherency domains, the write
419 * to the VF's PL-register-backed Mailbox Control can
420 * race in front of the writes to the MA-backed VF
421 * Mailbox Data "registers". So we need to do a
422 * read-back on at least one byte of the VF Mailbox
423 * Data registers before doing the write to the VF
424 * Mailbox Control register.
426 t4_read_reg(adap, data_reg);
429 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
430 read_tx_state(adap, &tx_state[0]); /* also flushes the write_reg */
431 next_tx_check = 1000;
436 * Loop waiting for the reply; bail out if we time out or the firmware
440 for (i = 0; i < timeout; i += ms) {
441 if (!(adap->flags & IS_VF)) {
442 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
443 if (pcie_fw & F_PCIE_FW_ERR)
447 if (i >= next_tx_check) {
448 check_tx_state(adap, &tx_state[0]);
449 next_tx_check = i + 1000;
453 ms = delay[delay_idx]; /* last element may repeat */
454 if (delay_idx < ARRAY_SIZE(delay) - 1)
461 v = t4_read_reg(adap, ctl_reg);
462 if (v == X_CIM_PF_NOACCESS)
464 if (G_MBOWNER(v) == X_MBOWNER_PL) {
465 if (!(v & F_MBMSGVALID)) {
466 t4_write_reg(adap, ctl_reg,
467 V_MBOWNER(X_MBOWNER_NONE));
472 * Retrieve the command reply and release the mailbox.
474 get_mbox_rpl(adap, cmd_rpl, MBOX_LEN/8, data_reg);
475 CH_DUMP_MBOX(adap, mbox, 0, "rpl", cmd_rpl, false);
476 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
478 res = be64_to_cpu(cmd_rpl[0]);
479 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
480 fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl);
481 res = V_FW_CMD_RETVAL(EIO);
483 memcpy(rpl, cmd_rpl, size);
484 return -G_FW_CMD_RETVAL((int)res);
489 * We timed out waiting for a reply to our mailbox command. Report
490 * the error and also check to see if the firmware reported any
493 CH_ERR(adap, "command %#x in mbox %d timed out (0x%08x).\n",
494 *(const u8 *)cmd, mbox, pcie_fw);
495 CH_DUMP_MBOX(adap, mbox, 0, "cmdsent", cmd_rpl, true);
496 CH_DUMP_MBOX(adap, mbox, data_reg, "current", NULL, true);
498 if (pcie_fw & F_PCIE_FW_ERR) {
500 t4_report_fw_error(adap);
503 t4_os_dump_devlog(adap);
506 t4_fatal_err(adap, true);
510 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
511 void *rpl, bool sleep_ok)
513 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl,
514 sleep_ok, FW_CMD_MAX_TIMEOUT);
518 static int t4_edc_err_read(struct adapter *adap, int idx)
520 u32 edc_ecc_err_addr_reg;
521 u32 edc_bist_status_rdata_reg;
524 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
527 if (idx != MEM_EDC0 && idx != MEM_EDC1) {
528 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
532 edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx);
533 edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx);
536 "edc%d err addr 0x%x: 0x%x.\n",
537 idx, edc_ecc_err_addr_reg,
538 t4_read_reg(adap, edc_ecc_err_addr_reg));
540 "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
541 edc_bist_status_rdata_reg,
542 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg),
543 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8),
544 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16),
545 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24),
546 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32),
547 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40),
548 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48),
549 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56),
550 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64));
556 * t4_mc_read - read from MC through backdoor accesses
558 * @idx: which MC to access
559 * @addr: address of first byte requested
560 * @data: 64 bytes of data containing the requested address
561 * @ecc: where to store the corresponding 64-bit ECC word
563 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
564 * that covers the requested address @addr. If @parity is not %NULL it
565 * is assigned the 64-bit ECC word for the read data.
567 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
570 u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
571 u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
574 mc_bist_cmd_reg = A_MC_BIST_CMD;
575 mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR;
576 mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN;
577 mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA;
578 mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN;
580 mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx);
581 mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx);
582 mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx);
583 mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA,
585 mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN,
589 if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST)
591 t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU);
592 t4_write_reg(adap, mc_bist_cmd_len_reg, 64);
593 t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc);
594 t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) |
595 F_START_BIST | V_BIST_CMD_GAP(1));
596 i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
600 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i)
602 for (i = 15; i >= 0; i--)
603 *data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
605 *ecc = t4_read_reg64(adap, MC_DATA(16));
611 * t4_edc_read - read from EDC through backdoor accesses
613 * @idx: which EDC to access
614 * @addr: address of first byte requested
615 * @data: 64 bytes of data containing the requested address
616 * @ecc: where to store the corresponding 64-bit ECC word
618 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
619 * that covers the requested address @addr. If @parity is not %NULL it
620 * is assigned the 64-bit ECC word for the read data.
622 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
625 u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
626 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
629 edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx);
630 edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx);
631 edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx);
632 edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN,
634 edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA,
638 * These macro are missing in t4_regs.h file.
639 * Added temporarily for testing.
641 #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
642 #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
643 edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx);
644 edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx);
645 edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx);
646 edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN,
648 edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA,
654 if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST)
656 t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU);
657 t4_write_reg(adap, edc_bist_cmd_len_reg, 64);
658 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
659 t4_write_reg(adap, edc_bist_cmd_reg,
660 V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
661 i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
665 #define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i)
667 for (i = 15; i >= 0; i--)
668 *data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
670 *ecc = t4_read_reg64(adap, EDC_DATA(16));
676 * t4_mem_read - read EDC 0, EDC 1 or MC into buffer
678 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
679 * @addr: address within indicated memory type
680 * @len: amount of memory to read
681 * @buf: host memory buffer
683 * Reads an [almost] arbitrary memory region in the firmware: the
684 * firmware memory address, length and host buffer must be aligned on
685 * 32-bit boudaries. The memory is returned as a raw byte sequence from
686 * the firmware's memory. If this memory contains data structures which
687 * contain multi-byte integers, it's the callers responsibility to
688 * perform appropriate byte order conversions.
690 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
693 u32 pos, start, end, offset;
697 * Argument sanity checks ...
699 if ((addr & 0x3) || (len & 0x3))
703 * The underlaying EDC/MC read routines read 64 bytes at a time so we
704 * need to round down the start and round up the end. We'll start
705 * copying out of the first line at (addr - start) a word at a time.
707 start = rounddown2(addr, 64);
708 end = roundup2(addr + len, 64);
709 offset = (addr - start)/sizeof(__be32);
711 for (pos = start; pos < end; pos += 64, offset = 0) {
715 * Read the chip's memory block and bail if there's an error.
717 if ((mtype == MEM_MC) || (mtype == MEM_MC1))
718 ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL);
720 ret = t4_edc_read(adap, mtype, pos, data, NULL);
725 * Copy the data into the caller's memory buffer.
727 while (offset < 16 && len > 0) {
728 *buf++ = data[offset++];
729 len -= sizeof(__be32);
737 * Return the specified PCI-E Configuration Space register from our Physical
738 * Function. We try first via a Firmware LDST Command (if fw_attach != 0)
739 * since we prefer to let the firmware own all of these registers, but if that
740 * fails we go for it directly ourselves.
742 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach)
746 * If fw_attach != 0, construct and send the Firmware LDST Command to
747 * retrieve the specified PCI-E Configuration Space register.
749 if (drv_fw_attach != 0) {
750 struct fw_ldst_cmd ldst_cmd;
753 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
754 ldst_cmd.op_to_addrspace =
755 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
758 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
759 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
760 ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1);
761 ldst_cmd.u.pcie.ctrl_to_fn =
762 (F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf));
763 ldst_cmd.u.pcie.r = reg;
766 * If the LDST Command succeeds, return the result, otherwise
767 * fall through to reading it directly ourselves ...
769 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
772 return be32_to_cpu(ldst_cmd.u.pcie.data[0]);
774 CH_WARN(adap, "Firmware failed to return "
775 "Configuration Space register %d, err = %d\n",
780 * Read the desired Configuration Space register via the PCI-E
781 * Backdoor mechanism.
783 return t4_hw_pci_read_cfg4(adap, reg);
787 * t4_get_regs_len - return the size of the chips register set
788 * @adapter: the adapter
790 * Returns the size of the chip's BAR0 register space.
792 unsigned int t4_get_regs_len(struct adapter *adapter)
794 unsigned int chip_version = chip_id(adapter);
796 switch (chip_version) {
798 if (adapter->flags & IS_VF)
799 return FW_T4VF_REGMAP_SIZE;
800 return T4_REGMAP_SIZE;
804 if (adapter->flags & IS_VF)
805 return FW_T4VF_REGMAP_SIZE;
806 return T5_REGMAP_SIZE;
810 "Unsupported chip version %d\n", chip_version);
815 * t4_get_regs - read chip registers into provided buffer
817 * @buf: register buffer
818 * @buf_size: size (in bytes) of register buffer
820 * If the provided register buffer isn't large enough for the chip's
821 * full register range, the register dump will be truncated to the
822 * register buffer's size.
824 void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
826 static const unsigned int t4_reg_ranges[] = {
1285 static const unsigned int t4vf_reg_ranges[] = {
1286 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
1287 VF_MPS_REG(A_MPS_VF_CTL),
1288 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
1289 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_WHOAMI),
1290 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
1291 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
1292 FW_T4VF_MBDATA_BASE_ADDR,
1293 FW_T4VF_MBDATA_BASE_ADDR +
1294 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
1297 static const unsigned int t5_reg_ranges[] = {
2064 static const unsigned int t5vf_reg_ranges[] = {
2065 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
2066 VF_MPS_REG(A_MPS_VF_CTL),
2067 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
2068 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION),
2069 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
2070 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
2071 FW_T4VF_MBDATA_BASE_ADDR,
2072 FW_T4VF_MBDATA_BASE_ADDR +
2073 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
2076 static const unsigned int t6_reg_ranges[] = {
2637 static const unsigned int t6vf_reg_ranges[] = {
2638 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
2639 VF_MPS_REG(A_MPS_VF_CTL),
2640 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
2641 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION),
2642 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
2643 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
2644 FW_T6VF_MBDATA_BASE_ADDR,
2645 FW_T6VF_MBDATA_BASE_ADDR +
2646 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
2649 u32 *buf_end = (u32 *)(buf + buf_size);
2650 const unsigned int *reg_ranges;
2651 int reg_ranges_size, range;
2652 unsigned int chip_version = chip_id(adap);
2655 * Select the right set of register ranges to dump depending on the
2656 * adapter chip type.
2658 switch (chip_version) {
2660 if (adap->flags & IS_VF) {
2661 reg_ranges = t4vf_reg_ranges;
2662 reg_ranges_size = ARRAY_SIZE(t4vf_reg_ranges);
2664 reg_ranges = t4_reg_ranges;
2665 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
2670 if (adap->flags & IS_VF) {
2671 reg_ranges = t5vf_reg_ranges;
2672 reg_ranges_size = ARRAY_SIZE(t5vf_reg_ranges);
2674 reg_ranges = t5_reg_ranges;
2675 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
2680 if (adap->flags & IS_VF) {
2681 reg_ranges = t6vf_reg_ranges;
2682 reg_ranges_size = ARRAY_SIZE(t6vf_reg_ranges);
2684 reg_ranges = t6_reg_ranges;
2685 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
2691 "Unsupported chip version %d\n", chip_version);
2696 * Clear the register buffer and insert the appropriate register
2697 * values selected by the above register ranges.
2699 memset(buf, 0, buf_size);
2700 for (range = 0; range < reg_ranges_size; range += 2) {
2701 unsigned int reg = reg_ranges[range];
2702 unsigned int last_reg = reg_ranges[range + 1];
2703 u32 *bufp = (u32 *)(buf + reg);
2706 * Iterate across the register range filling in the register
2707 * buffer but don't write past the end of the register buffer.
2709 while (reg <= last_reg && bufp < buf_end) {
2710 *bufp++ = t4_read_reg(adap, reg);
2717 * Partial EEPROM Vital Product Data structure. The VPD starts with one ID
2718 * header followed by one or more VPD-R sections, each with its own header.
2726 struct t4_vpdr_hdr {
2732 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
2734 #define EEPROM_DELAY 10 /* 10us per poll spin */
2735 #define EEPROM_MAX_POLL 5000 /* x 5000 == 50ms */
2737 #define EEPROM_STAT_ADDR 0x7bfc
2738 #define VPD_SIZE 0x800
2739 #define VPD_BASE 0x400
2740 #define VPD_BASE_OLD 0
2741 #define VPD_LEN 1024
2742 #define VPD_INFO_FLD_HDR_SIZE 3
2743 #define CHELSIO_VPD_UNIQUE_ID 0x82
2746 * Small utility function to wait till any outstanding VPD Access is complete.
2747 * We have a per-adapter state variable "VPD Busy" to indicate when we have a
2748 * VPD Access in flight. This allows us to handle the problem of having a
2749 * previous VPD Access time out and prevent an attempt to inject a new VPD
2750 * Request before any in-flight VPD reguest has completed.
2752 static int t4_seeprom_wait(struct adapter *adapter)
2754 unsigned int base = adapter->params.pci.vpd_cap_addr;
2758 * If no VPD Access is in flight, we can just return success right
2761 if (!adapter->vpd_busy)
2765 * Poll the VPD Capability Address/Flag register waiting for it
2766 * to indicate that the operation is complete.
2768 max_poll = EEPROM_MAX_POLL;
2772 udelay(EEPROM_DELAY);
2773 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
2776 * If the operation is complete, mark the VPD as no longer
2777 * busy and return success.
2779 if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
2780 adapter->vpd_busy = 0;
2783 } while (--max_poll);
2786 * Failure! Note that we leave the VPD Busy status set in order to
2787 * avoid pushing a new VPD Access request into the VPD Capability till
2788 * the current operation eventually succeeds. It's a bug to issue a
2789 * new request when an existing request is in flight and will result
2790 * in corrupt hardware state.
2796 * t4_seeprom_read - read a serial EEPROM location
2797 * @adapter: adapter to read
2798 * @addr: EEPROM virtual address
2799 * @data: where to store the read data
2801 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
2802 * VPD capability. Note that this function must be called with a virtual
2805 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
2807 unsigned int base = adapter->params.pci.vpd_cap_addr;
2811 * VPD Accesses must alway be 4-byte aligned!
2813 if (addr >= EEPROMVSIZE || (addr & 3))
2817 * Wait for any previous operation which may still be in flight to
2820 ret = t4_seeprom_wait(adapter);
2822 CH_ERR(adapter, "VPD still busy from previous operation\n");
2827 * Issue our new VPD Read request, mark the VPD as being busy and wait
2828 * for our request to complete. If it doesn't complete, note the
2829 * error and return it to our caller. Note that we do not reset the
2832 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
2833 adapter->vpd_busy = 1;
2834 adapter->vpd_flag = PCI_VPD_ADDR_F;
2835 ret = t4_seeprom_wait(adapter);
2837 CH_ERR(adapter, "VPD read of address %#x failed\n", addr);
2842 * Grab the returned data, swizzle it into our endianness and
2845 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
2846 *data = le32_to_cpu(*data);
2851 * t4_seeprom_write - write a serial EEPROM location
2852 * @adapter: adapter to write
2853 * @addr: virtual EEPROM address
2854 * @data: value to write
2856 * Write a 32-bit word to a location in serial EEPROM using the card's PCI
2857 * VPD capability. Note that this function must be called with a virtual
2860 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
2862 unsigned int base = adapter->params.pci.vpd_cap_addr;
2868 * VPD Accesses must alway be 4-byte aligned!
2870 if (addr >= EEPROMVSIZE || (addr & 3))
2874 * Wait for any previous operation which may still be in flight to
2877 ret = t4_seeprom_wait(adapter);
2879 CH_ERR(adapter, "VPD still busy from previous operation\n");
2884 * Issue our new VPD Read request, mark the VPD as being busy and wait
2885 * for our request to complete. If it doesn't complete, note the
2886 * error and return it to our caller. Note that we do not reset the
2889 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
2891 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
2892 (u16)addr | PCI_VPD_ADDR_F);
2893 adapter->vpd_busy = 1;
2894 adapter->vpd_flag = 0;
2895 ret = t4_seeprom_wait(adapter);
2897 CH_ERR(adapter, "VPD write of address %#x failed\n", addr);
2902 * Reset PCI_VPD_DATA register after a transaction and wait for our
2903 * request to complete. If it doesn't complete, return error.
2905 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
2906 max_poll = EEPROM_MAX_POLL;
2908 udelay(EEPROM_DELAY);
2909 t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
2910 } while ((stats_reg & 0x1) && --max_poll);
2914 /* Return success! */
2919 * t4_eeprom_ptov - translate a physical EEPROM address to virtual
2920 * @phys_addr: the physical EEPROM address
2921 * @fn: the PCI function number
2922 * @sz: size of function-specific area
2924 * Translate a physical EEPROM address to virtual. The first 1K is
2925 * accessed through virtual addresses starting at 31K, the rest is
2926 * accessed through virtual addresses starting at 0.
2928 * The mapping is as follows:
2929 * [0..1K) -> [31K..32K)
2930 * [1K..1K+A) -> [ES-A..ES)
2931 * [1K+A..ES) -> [0..ES-A-1K)
2933 * where A = @fn * @sz, and ES = EEPROM size.
2935 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2938 if (phys_addr < 1024)
2939 return phys_addr + (31 << 10);
2940 if (phys_addr < 1024 + fn)
2941 return EEPROMSIZE - fn + phys_addr - 1024;
2942 if (phys_addr < EEPROMSIZE)
2943 return phys_addr - 1024 - fn;
2948 * t4_seeprom_wp - enable/disable EEPROM write protection
2949 * @adapter: the adapter
2950 * @enable: whether to enable or disable write protection
2952 * Enables or disables write protection on the serial EEPROM.
2954 int t4_seeprom_wp(struct adapter *adapter, int enable)
2956 return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
2960 * get_vpd_keyword_val - Locates an information field keyword in the VPD
2961 * @vpd: Pointer to buffered vpd data structure
2962 * @kw: The keyword to search for
2963 * @region: VPD region to search (starting from 0)
2965 * Returns the value of the information field keyword or
2966 * -ENOENT otherwise.
2968 static int get_vpd_keyword_val(const u8 *vpd, const char *kw, int region)
2971 unsigned int offset, len;
2972 const struct t4_vpdr_hdr *vpdr;
2974 offset = sizeof(struct t4_vpd_hdr);
2975 vpdr = (const void *)(vpd + offset);
2976 tag = vpdr->vpdr_tag;
2977 len = (u16)vpdr->vpdr_len[0] + ((u16)vpdr->vpdr_len[1] << 8);
2979 offset += sizeof(struct t4_vpdr_hdr) + len;
2980 vpdr = (const void *)(vpd + offset);
2981 if (++tag != vpdr->vpdr_tag)
2983 len = (u16)vpdr->vpdr_len[0] + ((u16)vpdr->vpdr_len[1] << 8);
2985 offset += sizeof(struct t4_vpdr_hdr);
2987 if (offset + len > VPD_LEN) {
2991 for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
2992 if (memcmp(vpd + i , kw , 2) == 0){
2993 i += VPD_INFO_FLD_HDR_SIZE;
2997 i += VPD_INFO_FLD_HDR_SIZE + vpd[i+2];
3005 * get_vpd_params - read VPD parameters from VPD EEPROM
3006 * @adapter: adapter to read
3007 * @p: where to store the parameters
3008 * @vpd: caller provided temporary space to read the VPD into
3010 * Reads card parameters stored in VPD EEPROM.
3012 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p,
3013 uint16_t device_id, u32 *buf)
3016 int ec, sn, pn, na, md;
3018 const u8 *vpd = (const u8 *)buf;
3021 * Card information normally starts at VPD_BASE but early cards had
3024 ret = t4_seeprom_read(adapter, VPD_BASE, buf);
3029 * The VPD shall have a unique identifier specified by the PCI SIG.
3030 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
3031 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
3032 * is expected to automatically put this entry at the
3033 * beginning of the VPD.
3035 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
3037 for (i = 0; i < VPD_LEN; i += 4) {
3038 ret = t4_seeprom_read(adapter, addr + i, buf++);
3043 #define FIND_VPD_KW(var,name) do { \
3044 var = get_vpd_keyword_val(vpd, name, 0); \
3046 CH_ERR(adapter, "missing VPD keyword " name "\n"); \
3051 FIND_VPD_KW(i, "RV");
3052 for (csum = 0; i >= 0; i--)
3057 "corrupted VPD EEPROM, actual csum %u\n", csum);
3061 FIND_VPD_KW(ec, "EC");
3062 FIND_VPD_KW(sn, "SN");
3063 FIND_VPD_KW(pn, "PN");
3064 FIND_VPD_KW(na, "NA");
3067 memcpy(p->id, vpd + offsetof(struct t4_vpd_hdr, id_data), ID_LEN);
3069 memcpy(p->ec, vpd + ec, EC_LEN);
3071 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
3072 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
3074 i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2];
3075 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
3076 strstrip((char *)p->pn);
3077 i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2];
3078 memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
3079 strstrip((char *)p->na);
3081 if (device_id & 0x80)
3082 return 0; /* Custom card */
3084 md = get_vpd_keyword_val(vpd, "VF", 1);
3086 snprintf(p->md, sizeof(p->md), "unknown");
3088 i = vpd[md - VPD_INFO_FLD_HDR_SIZE + 2];
3089 memcpy(p->md, vpd + md, min(i, MD_LEN));
3090 strstrip((char *)p->md);
3096 /* serial flash and firmware constants and flash config file constants */
3098 SF_ATTEMPTS = 10, /* max retries for SF operations */
3100 /* flash command opcodes */
3101 SF_PROG_PAGE = 2, /* program 256B page */
3102 SF_WR_DISABLE = 4, /* disable writes */
3103 SF_RD_STATUS = 5, /* read status register */
3104 SF_WR_ENABLE = 6, /* enable writes */
3105 SF_RD_DATA_FAST = 0xb, /* read flash */
3106 SF_RD_ID = 0x9f, /* read ID */
3107 SF_ERASE_SECTOR = 0xd8, /* erase 64KB sector */
3111 * sf1_read - read data from the serial flash
3112 * @adapter: the adapter
3113 * @byte_cnt: number of bytes to read
3114 * @cont: whether another operation will be chained
3115 * @lock: whether to lock SF for PL access only
3116 * @valp: where to store the read data
3118 * Reads up to 4 bytes of data from the serial flash. The location of
3119 * the read needs to be specified prior to calling this by issuing the
3120 * appropriate commands to the serial flash.
3122 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
3123 int lock, u32 *valp)
3127 if (!byte_cnt || byte_cnt > 4)
3129 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3131 t4_write_reg(adapter, A_SF_OP,
3132 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
3133 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3135 *valp = t4_read_reg(adapter, A_SF_DATA);
3140 * sf1_write - write data to the serial flash
3141 * @adapter: the adapter
3142 * @byte_cnt: number of bytes to write
3143 * @cont: whether another operation will be chained
3144 * @lock: whether to lock SF for PL access only
3145 * @val: value to write
3147 * Writes up to 4 bytes of data to the serial flash. The location of
3148 * the write needs to be specified prior to calling this by issuing the
3149 * appropriate commands to the serial flash.
3151 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
3154 if (!byte_cnt || byte_cnt > 4)
3156 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3158 t4_write_reg(adapter, A_SF_DATA, val);
3159 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
3160 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
3161 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3165 * flash_wait_op - wait for a flash operation to complete
3166 * @adapter: the adapter
3167 * @attempts: max number of polls of the status register
3168 * @delay: delay between polls in ms
3170 * Wait for a flash operation to complete by polling the status register.
3172 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
3178 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
3179 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
3183 if (--attempts == 0)
3191 * t4_read_flash - read words from serial flash
3192 * @adapter: the adapter
3193 * @addr: the start address for the read
3194 * @nwords: how many 32-bit words to read
3195 * @data: where to store the read data
3196 * @byte_oriented: whether to store data as bytes or as words
3198 * Read the specified number of 32-bit words from the serial flash.
3199 * If @byte_oriented is set the read data is stored as a byte array
3200 * (i.e., big-endian), otherwise as 32-bit words in the platform's
3201 * natural endianness.
3203 int t4_read_flash(struct adapter *adapter, unsigned int addr,
3204 unsigned int nwords, u32 *data, int byte_oriented)
3208 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
3211 addr = swab32(addr) | SF_RD_DATA_FAST;
3213 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
3214 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
3217 for ( ; nwords; nwords--, data++) {
3218 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
3220 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3224 *data = (__force __u32)(cpu_to_be32(*data));
3230 * t4_write_flash - write up to a page of data to the serial flash
3231 * @adapter: the adapter
3232 * @addr: the start address to write
3233 * @n: length of data to write in bytes
3234 * @data: the data to write
3235 * @byte_oriented: whether to store data as bytes or as words
3237 * Writes up to a page of data (256 bytes) to the serial flash starting
3238 * at the given address. All the data must be written to the same page.
3239 * If @byte_oriented is set the write data is stored as byte stream
3240 * (i.e. matches what on disk), otherwise in big-endian.
3242 int t4_write_flash(struct adapter *adapter, unsigned int addr,
3243 unsigned int n, const u8 *data, int byte_oriented)
3246 u32 buf[SF_PAGE_SIZE / 4];
3247 unsigned int i, c, left, val, offset = addr & 0xff;
3249 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
3252 val = swab32(addr) | SF_PROG_PAGE;
3254 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3255 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
3258 for (left = n; left; left -= c) {
3260 for (val = 0, i = 0; i < c; ++i)
3261 val = (val << 8) + *data++;
3264 val = cpu_to_be32(val);
3266 ret = sf1_write(adapter, c, c != left, 1, val);
3270 ret = flash_wait_op(adapter, 8, 1);
3274 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3276 /* Read the page to verify the write succeeded */
3277 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
3282 if (memcmp(data - n, (u8 *)buf + offset, n)) {
3284 "failed to correctly write the flash page at %#x\n",
3291 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3296 * t4_get_fw_version - read the firmware version
3297 * @adapter: the adapter
3298 * @vers: where to place the version
3300 * Reads the FW version from flash.
3302 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
3304 return t4_read_flash(adapter, FLASH_FW_START +
3305 offsetof(struct fw_hdr, fw_ver), 1,
3310 * t4_get_fw_hdr - read the firmware header
3311 * @adapter: the adapter
3312 * @hdr: where to place the version
3314 * Reads the FW header from flash into caller provided buffer.
3316 int t4_get_fw_hdr(struct adapter *adapter, struct fw_hdr *hdr)
3318 return t4_read_flash(adapter, FLASH_FW_START,
3319 sizeof (*hdr) / sizeof (uint32_t), (uint32_t *)hdr, 1);
3323 * t4_get_bs_version - read the firmware bootstrap version
3324 * @adapter: the adapter
3325 * @vers: where to place the version
3327 * Reads the FW Bootstrap version from flash.
3329 int t4_get_bs_version(struct adapter *adapter, u32 *vers)
3331 return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
3332 offsetof(struct fw_hdr, fw_ver), 1,
3337 * t4_get_tp_version - read the TP microcode version
3338 * @adapter: the adapter
3339 * @vers: where to place the version
3341 * Reads the TP microcode version from flash.
3343 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
3345 return t4_read_flash(adapter, FLASH_FW_START +
3346 offsetof(struct fw_hdr, tp_microcode_ver),
3351 * t4_get_exprom_version - return the Expansion ROM version (if any)
3352 * @adapter: the adapter
3353 * @vers: where to place the version
3355 * Reads the Expansion ROM header from FLASH and returns the version
3356 * number (if present) through the @vers return value pointer. We return
3357 * this in the Firmware Version Format since it's convenient. Return
3358 * 0 on success, -ENOENT if no Expansion ROM is present.
3360 int t4_get_exprom_version(struct adapter *adap, u32 *vers)
3362 struct exprom_header {
3363 unsigned char hdr_arr[16]; /* must start with 0x55aa */
3364 unsigned char hdr_ver[4]; /* Expansion ROM version */
3366 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
3370 ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
3371 ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
3376 hdr = (struct exprom_header *)exprom_header_buf;
3377 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
3380 *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
3381 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
3382 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
3383 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
3388 * t4_get_scfg_version - return the Serial Configuration version
3389 * @adapter: the adapter
3390 * @vers: where to place the version
3392 * Reads the Serial Configuration Version via the Firmware interface
3393 * (thus this can only be called once we're ready to issue Firmware
3394 * commands). The format of the Serial Configuration version is
3395 * adapter specific. Returns 0 on success, an error on failure.
3397 * Note that early versions of the Firmware didn't include the ability
3398 * to retrieve the Serial Configuration version, so we zero-out the
3399 * return-value parameter in that case to avoid leaving it with
3402 * Also note that the Firmware will return its cached copy of the Serial
3403 * Initialization Revision ID, not the actual Revision ID as written in
3404 * the Serial EEPROM. This is only an issue if a new VPD has been written
3405 * and the Firmware/Chip haven't yet gone through a RESET sequence. So
3406 * it's best to defer calling this routine till after a FW_RESET_CMD has
3407 * been issued if the Host Driver will be performing a full adapter
3410 int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
3415 scfgrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3416 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_SCFGREV));
3417 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3418 1, &scfgrev_param, vers);
3425 * t4_get_vpd_version - return the VPD version
3426 * @adapter: the adapter
3427 * @vers: where to place the version
3429 * Reads the VPD via the Firmware interface (thus this can only be called
3430 * once we're ready to issue Firmware commands). The format of the
3431 * VPD version is adapter specific. Returns 0 on success, an error on
3434 * Note that early versions of the Firmware didn't include the ability
3435 * to retrieve the VPD version, so we zero-out the return-value parameter
3436 * in that case to avoid leaving it with garbage in it.
3438 * Also note that the Firmware will return its cached copy of the VPD
3439 * Revision ID, not the actual Revision ID as written in the Serial
3440 * EEPROM. This is only an issue if a new VPD has been written and the
3441 * Firmware/Chip haven't yet gone through a RESET sequence. So it's best
3442 * to defer calling this routine till after a FW_RESET_CMD has been issued
3443 * if the Host Driver will be performing a full adapter initialization.
3445 int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
3450 vpdrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3451 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_VPDREV));
3452 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3453 1, &vpdrev_param, vers);
3460 * t4_get_version_info - extract various chip/firmware version information
3461 * @adapter: the adapter
3463 * Reads various chip/firmware version numbers and stores them into the
3464 * adapter Adapter Parameters structure. If any of the efforts fails
3465 * the first failure will be returned, but all of the version numbers
3468 int t4_get_version_info(struct adapter *adapter)
3472 #define FIRST_RET(__getvinfo) \
3474 int __ret = __getvinfo; \
3475 if (__ret && !ret) \
3479 FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
3480 FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
3481 FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
3482 FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
3483 FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
3484 FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
3492 * t4_flash_erase_sectors - erase a range of flash sectors
3493 * @adapter: the adapter
3494 * @start: the first sector to erase
3495 * @end: the last sector to erase
3497 * Erases the sectors in the given inclusive range.
3499 int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
3503 if (end >= adapter->params.sf_nsec)
3506 while (start <= end) {
3507 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3508 (ret = sf1_write(adapter, 4, 0, 1,
3509 SF_ERASE_SECTOR | (start << 8))) != 0 ||
3510 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
3512 "erase of flash sector %d failed, error %d\n",
3518 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3523 * t4_flash_cfg_addr - return the address of the flash configuration file
3524 * @adapter: the adapter
3526 * Return the address within the flash where the Firmware Configuration
3527 * File is stored, or an error if the device FLASH is too small to contain
3528 * a Firmware Configuration File.
3530 int t4_flash_cfg_addr(struct adapter *adapter)
3533 * If the device FLASH isn't large enough to hold a Firmware
3534 * Configuration File, return an error.
3536 if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
3539 return FLASH_CFG_START;
3543 * Return TRUE if the specified firmware matches the adapter. I.e. T4
3544 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
3545 * and emit an error message for mismatched firmware to save our caller the
3548 static int t4_fw_matches_chip(struct adapter *adap,
3549 const struct fw_hdr *hdr)
3552 * The expression below will return FALSE for any unsupported adapter
3553 * which will keep us "honest" in the future ...
3555 if ((is_t4(adap) && hdr->chip == FW_HDR_CHIP_T4) ||
3556 (is_t5(adap) && hdr->chip == FW_HDR_CHIP_T5) ||
3557 (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6))
3561 "FW image (%d) is not suitable for this adapter (%d)\n",
3562 hdr->chip, chip_id(adap));
3567 * t4_load_fw - download firmware
3568 * @adap: the adapter
3569 * @fw_data: the firmware image to write
3572 * Write the supplied firmware image to the card's serial flash.
3574 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3579 u8 first_page[SF_PAGE_SIZE];
3580 const u32 *p = (const u32 *)fw_data;
3581 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
3582 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
3583 unsigned int fw_start_sec;
3584 unsigned int fw_start;
3585 unsigned int fw_size;
3587 if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) {
3588 fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
3589 fw_start = FLASH_FWBOOTSTRAP_START;
3590 fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
3592 fw_start_sec = FLASH_FW_START_SEC;
3593 fw_start = FLASH_FW_START;
3594 fw_size = FLASH_FW_MAX_SIZE;
3598 CH_ERR(adap, "FW image has no data\n");
3603 "FW image size not multiple of 512 bytes\n");
3606 if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) {
3608 "FW image size differs from size in FW header\n");
3611 if (size > fw_size) {
3612 CH_ERR(adap, "FW image too large, max is %u bytes\n",
3616 if (!t4_fw_matches_chip(adap, hdr))
3619 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
3620 csum += be32_to_cpu(p[i]);
3622 if (csum != 0xffffffff) {
3624 "corrupted firmware image, checksum %#x\n", csum);
3628 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
3629 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
3634 * We write the correct version at the end so the driver can see a bad
3635 * version if the FW write fails. Start by writing a copy of the
3636 * first page with a bad version.
3638 memcpy(first_page, fw_data, SF_PAGE_SIZE);
3639 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
3640 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
3645 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
3646 addr += SF_PAGE_SIZE;
3647 fw_data += SF_PAGE_SIZE;
3648 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
3653 ret = t4_write_flash(adap,
3654 fw_start + offsetof(struct fw_hdr, fw_ver),
3655 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
3658 CH_ERR(adap, "firmware download failed, error %d\n",
3664 * t4_fwcache - firmware cache operation
3665 * @adap: the adapter
3666 * @op : the operation (flush or flush and invalidate)
3668 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
3670 struct fw_params_cmd c;
3672 memset(&c, 0, sizeof(c));
3674 cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
3675 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
3676 V_FW_PARAMS_CMD_PFN(adap->pf) |
3677 V_FW_PARAMS_CMD_VFN(0));
3678 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3680 cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3681 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE));
3682 c.param[0].val = (__force __be32)op;
3684 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
3687 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
3688 unsigned int *pif_req_wrptr,
3689 unsigned int *pif_rsp_wrptr)
3692 u32 cfg, val, req, rsp;
3694 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
3695 if (cfg & F_LADBGEN)
3696 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
3698 val = t4_read_reg(adap, A_CIM_DEBUGSTS);
3699 req = G_POLADBGWRPTR(val);
3700 rsp = G_PILADBGWRPTR(val);
3702 *pif_req_wrptr = req;
3704 *pif_rsp_wrptr = rsp;
3706 for (i = 0; i < CIM_PIFLA_SIZE; i++) {
3707 for (j = 0; j < 6; j++) {
3708 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
3709 V_PILADBGRDPTR(rsp));
3710 *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
3711 *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
3715 req = (req + 2) & M_POLADBGRDPTR;
3716 rsp = (rsp + 2) & M_PILADBGRDPTR;
3718 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
3721 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
3726 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
3727 if (cfg & F_LADBGEN)
3728 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
3730 for (i = 0; i < CIM_MALA_SIZE; i++) {
3731 for (j = 0; j < 5; j++) {
3733 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
3734 V_PILADBGRDPTR(idx));
3735 *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
3736 *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
3739 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
3742 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
3746 for (i = 0; i < 8; i++) {
3747 u32 *p = la_buf + i;
3749 t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
3750 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
3751 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
3752 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
3753 *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
3758 * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
3759 * @caps16: a 16-bit Port Capabilities value
3761 * Returns the equivalent 32-bit Port Capabilities value.
3763 static uint32_t fwcaps16_to_caps32(uint16_t caps16)
3765 uint32_t caps32 = 0;
3767 #define CAP16_TO_CAP32(__cap) \
3769 if (caps16 & FW_PORT_CAP_##__cap) \
3770 caps32 |= FW_PORT_CAP32_##__cap; \
3773 CAP16_TO_CAP32(SPEED_100M);
3774 CAP16_TO_CAP32(SPEED_1G);
3775 CAP16_TO_CAP32(SPEED_25G);
3776 CAP16_TO_CAP32(SPEED_10G);
3777 CAP16_TO_CAP32(SPEED_40G);
3778 CAP16_TO_CAP32(SPEED_100G);
3779 CAP16_TO_CAP32(FC_RX);
3780 CAP16_TO_CAP32(FC_TX);
3781 CAP16_TO_CAP32(ANEG);
3782 CAP16_TO_CAP32(FORCE_PAUSE);
3783 CAP16_TO_CAP32(MDIAUTO);
3784 CAP16_TO_CAP32(MDISTRAIGHT);
3785 CAP16_TO_CAP32(FEC_RS);
3786 CAP16_TO_CAP32(FEC_BASER_RS);
3787 CAP16_TO_CAP32(802_3_PAUSE);
3788 CAP16_TO_CAP32(802_3_ASM_DIR);
3790 #undef CAP16_TO_CAP32
3796 * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
3797 * @caps32: a 32-bit Port Capabilities value
3799 * Returns the equivalent 16-bit Port Capabilities value. Note that
3800 * not all 32-bit Port Capabilities can be represented in the 16-bit
3801 * Port Capabilities and some fields/values may not make it.
3803 static uint16_t fwcaps32_to_caps16(uint32_t caps32)
3805 uint16_t caps16 = 0;
3807 #define CAP32_TO_CAP16(__cap) \
3809 if (caps32 & FW_PORT_CAP32_##__cap) \
3810 caps16 |= FW_PORT_CAP_##__cap; \
3813 CAP32_TO_CAP16(SPEED_100M);
3814 CAP32_TO_CAP16(SPEED_1G);
3815 CAP32_TO_CAP16(SPEED_10G);
3816 CAP32_TO_CAP16(SPEED_25G);
3817 CAP32_TO_CAP16(SPEED_40G);
3818 CAP32_TO_CAP16(SPEED_100G);
3819 CAP32_TO_CAP16(FC_RX);
3820 CAP32_TO_CAP16(FC_TX);
3821 CAP32_TO_CAP16(802_3_PAUSE);
3822 CAP32_TO_CAP16(802_3_ASM_DIR);
3823 CAP32_TO_CAP16(ANEG);
3824 CAP32_TO_CAP16(FORCE_PAUSE);
3825 CAP32_TO_CAP16(MDIAUTO);
3826 CAP32_TO_CAP16(MDISTRAIGHT);
3827 CAP32_TO_CAP16(FEC_RS);
3828 CAP32_TO_CAP16(FEC_BASER_RS);
3830 #undef CAP32_TO_CAP16
3836 is_bt(struct port_info *pi)
3839 return (pi->port_type == FW_PORT_TYPE_BT_SGMII ||
3840 pi->port_type == FW_PORT_TYPE_BT_XFI ||
3841 pi->port_type == FW_PORT_TYPE_BT_XAUI);
3845 * t4_link_l1cfg - apply link configuration to MAC/PHY
3846 * @phy: the PHY to setup
3847 * @mac: the MAC to setup
3848 * @lc: the requested link configuration
3850 * Set up a port's MAC and PHY according to a desired link configuration.
3851 * - If the PHY can auto-negotiate first decide what to advertise, then
3852 * enable/disable auto-negotiation as desired, and reset.
3853 * - If the PHY does not auto-negotiate just reset it.
3854 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
3855 * otherwise do it later based on the outcome of auto-negotiation.
3857 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
3858 struct link_config *lc)
3860 struct fw_port_cmd c;
3861 unsigned int mdi = V_FW_PORT_CAP32_MDI(FW_PORT_CAP32_MDI_AUTO);
3862 unsigned int aneg, fc, fec, speed, rcap;
3865 if (lc->requested_fc & PAUSE_RX)
3866 fc |= FW_PORT_CAP32_FC_RX;
3867 if (lc->requested_fc & PAUSE_TX)
3868 fc |= FW_PORT_CAP32_FC_TX;
3869 if (!(lc->requested_fc & PAUSE_AUTONEG))
3870 fc |= FW_PORT_CAP32_FORCE_PAUSE;
3873 if (lc->requested_fec == FEC_AUTO)
3876 if (lc->requested_fec & FEC_RS)
3877 fec |= FW_PORT_CAP32_FEC_RS;
3878 if (lc->requested_fec & FEC_BASER_RS)
3879 fec |= FW_PORT_CAP32_FEC_BASER_RS;
3882 if (lc->requested_aneg == AUTONEG_DISABLE)
3884 else if (lc->requested_aneg == AUTONEG_ENABLE)
3885 aneg = FW_PORT_CAP32_ANEG;
3887 aneg = lc->supported & FW_PORT_CAP32_ANEG;
3890 speed = lc->supported & V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED);
3891 } else if (lc->requested_speed != 0)
3892 speed = speed_to_fwcap(lc->requested_speed);
3894 speed = fwcap_top_speed(lc->supported);
3896 /* Force AN on for BT cards. */
3897 if (is_bt(adap->port[port]))
3898 aneg = lc->supported & FW_PORT_CAP32_ANEG;
3900 rcap = aneg | speed | fc | fec;
3901 if ((rcap | lc->supported) != lc->supported) {
3903 CH_WARN(adap, "rcap 0x%08x, pcap 0x%08x\n", rcap,
3906 rcap &= lc->supported;
3910 memset(&c, 0, sizeof(c));
3911 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
3912 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
3913 V_FW_PORT_CMD_PORTID(port));
3914 if (adap->params.port_caps32) {
3916 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG32) |
3918 c.u.l1cfg32.rcap32 = cpu_to_be32(rcap);
3921 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
3923 c.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap));
3926 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
3930 * t4_restart_aneg - restart autonegotiation
3931 * @adap: the adapter
3932 * @mbox: mbox to use for the FW command
3933 * @port: the port id
3935 * Restarts autonegotiation for the selected port.
3937 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
3939 struct fw_port_cmd c;
3941 memset(&c, 0, sizeof(c));
3942 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
3943 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
3944 V_FW_PORT_CMD_PORTID(port));
3946 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
3948 c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
3949 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3952 struct intr_details {
3957 struct intr_action {
3960 bool (*action)(struct adapter *, int, bool);
3964 const char *name; /* name of the INT_CAUSE register */
3965 int cause_reg; /* INT_CAUSE register */
3966 int enable_reg; /* INT_ENABLE register */
3967 u32 fatal; /* bits that are fatal */
3968 const struct intr_details *details;
3969 const struct intr_action *actions;
3973 intr_alert_char(u32 cause, u32 enable, u32 fatal)
3984 t4_show_intr_info(struct adapter *adap, const struct intr_info *ii, u32 cause)
3986 u32 enable, leftover;
3987 const struct intr_details *details;
3990 enable = t4_read_reg(adap, ii->enable_reg);
3991 alert = intr_alert_char(cause, enable, ii->fatal);
3992 CH_ALERT(adap, "%c %s 0x%x = 0x%08x, E 0x%08x, F 0x%08x\n",
3993 alert, ii->name, ii->cause_reg, cause, enable, ii->fatal);
3996 for (details = ii->details; details && details->mask != 0; details++) {
3997 u32 msgbits = details->mask & cause;
4000 alert = intr_alert_char(msgbits, enable, ii->fatal);
4001 CH_ALERT(adap, " %c [0x%08x] %s\n", alert, msgbits,
4003 leftover &= ~msgbits;
4005 if (leftover != 0 && leftover != cause)
4006 CH_ALERT(adap, " ? [0x%08x]\n", leftover);
4010 * Returns true for fatal error.
4013 t4_handle_intr(struct adapter *adap, const struct intr_info *ii,
4014 u32 additional_cause, bool verbose)
4018 const struct intr_action *action;
4020 /* read and display cause. */
4021 cause = t4_read_reg(adap, ii->cause_reg);
4022 if (verbose || cause != 0)
4023 t4_show_intr_info(adap, ii, cause);
4024 fatal = (cause & ii->fatal) != 0;
4025 cause |= additional_cause;
4029 for (action = ii->actions; action && action->mask != 0; action++) {
4030 if (!(action->mask & cause))
4032 fatal |= (action->action)(adap, action->arg, verbose);
4036 t4_write_reg(adap, ii->cause_reg, cause);
4037 (void)t4_read_reg(adap, ii->cause_reg);
4043 * Interrupt handler for the PCIE module.
4045 static bool pcie_intr_handler(struct adapter *adap, int arg, bool verbose)
4047 static const struct intr_details sysbus_intr_details[] = {
4048 { F_RNPP, "RXNP array parity error" },
4049 { F_RPCP, "RXPC array parity error" },
4050 { F_RCIP, "RXCIF array parity error" },
4051 { F_RCCP, "Rx completions control array parity error" },
4052 { F_RFTP, "RXFT array parity error" },
4055 static const struct intr_info sysbus_intr_info = {
4056 .name = "PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS",
4057 .cause_reg = A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
4058 .enable_reg = A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_INTERRUPT_ENABLE,
4059 .fatal = F_RFTP | F_RCCP | F_RCIP | F_RPCP | F_RNPP,
4060 .details = sysbus_intr_details,
4063 static const struct intr_details pcie_port_intr_details[] = {
4064 { F_TPCP, "TXPC array parity error" },
4065 { F_TNPP, "TXNP array parity error" },
4066 { F_TFTP, "TXFT array parity error" },
4067 { F_TCAP, "TXCA array parity error" },
4068 { F_TCIP, "TXCIF array parity error" },
4069 { F_RCAP, "RXCA array parity error" },
4070 { F_OTDD, "outbound request TLP discarded" },
4071 { F_RDPE, "Rx data parity error" },
4072 { F_TDUE, "Tx uncorrectable data error" },
4075 static const struct intr_info pcie_port_intr_info = {
4076 .name = "PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS",
4077 .cause_reg = A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
4078 .enable_reg = A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_INTERRUPT_ENABLE,
4079 .fatal = F_TPCP | F_TNPP | F_TFTP | F_TCAP | F_TCIP | F_RCAP |
4080 F_OTDD | F_RDPE | F_TDUE,
4081 .details = pcie_port_intr_details,
4084 static const struct intr_details pcie_intr_details[] = {
4085 { F_MSIADDRLPERR, "MSI AddrL parity error" },
4086 { F_MSIADDRHPERR, "MSI AddrH parity error" },
4087 { F_MSIDATAPERR, "MSI data parity error" },
4088 { F_MSIXADDRLPERR, "MSI-X AddrL parity error" },
4089 { F_MSIXADDRHPERR, "MSI-X AddrH parity error" },
4090 { F_MSIXDATAPERR, "MSI-X data parity error" },
4091 { F_MSIXDIPERR, "MSI-X DI parity error" },
4092 { F_PIOCPLPERR, "PCIe PIO completion FIFO parity error" },
4093 { F_PIOREQPERR, "PCIe PIO request FIFO parity error" },
4094 { F_TARTAGPERR, "PCIe target tag FIFO parity error" },
4095 { F_CCNTPERR, "PCIe CMD channel count parity error" },
4096 { F_CREQPERR, "PCIe CMD channel request parity error" },
4097 { F_CRSPPERR, "PCIe CMD channel response parity error" },
4098 { F_DCNTPERR, "PCIe DMA channel count parity error" },
4099 { F_DREQPERR, "PCIe DMA channel request parity error" },
4100 { F_DRSPPERR, "PCIe DMA channel response parity error" },
4101 { F_HCNTPERR, "PCIe HMA channel count parity error" },
4102 { F_HREQPERR, "PCIe HMA channel request parity error" },
4103 { F_HRSPPERR, "PCIe HMA channel response parity error" },
4104 { F_CFGSNPPERR, "PCIe config snoop FIFO parity error" },
4105 { F_FIDPERR, "PCIe FID parity error" },
4106 { F_INTXCLRPERR, "PCIe INTx clear parity error" },
4107 { F_MATAGPERR, "PCIe MA tag parity error" },
4108 { F_PIOTAGPERR, "PCIe PIO tag parity error" },
4109 { F_RXCPLPERR, "PCIe Rx completion parity error" },
4110 { F_RXWRPERR, "PCIe Rx write parity error" },
4111 { F_RPLPERR, "PCIe replay buffer parity error" },
4112 { F_PCIESINT, "PCIe core secondary fault" },
4113 { F_PCIEPINT, "PCIe core primary fault" },
4114 { F_UNXSPLCPLERR, "PCIe unexpected split completion error" },
4117 static const struct intr_details t5_pcie_intr_details[] = {
4118 { F_IPGRPPERR, "Parity errors observed by IP" },
4119 { F_NONFATALERR, "PCIe non-fatal error" },
4120 { F_READRSPERR, "Outbound read error" },
4121 { F_TRGT1GRPPERR, "PCIe TRGT1 group FIFOs parity error" },
4122 { F_IPSOTPERR, "PCIe IP SOT buffer SRAM parity error" },
4123 { F_IPRETRYPERR, "PCIe IP replay buffer parity error" },
4124 { F_IPRXDATAGRPPERR, "PCIe IP Rx data group SRAMs parity error" },
4125 { F_IPRXHDRGRPPERR, "PCIe IP Rx header group SRAMs parity error" },
4126 { F_PIOTAGQPERR, "PIO tag queue FIFO parity error" },
4127 { F_MAGRPPERR, "MA group FIFO parity error" },
4128 { F_VFIDPERR, "VFID SRAM parity error" },
4129 { F_FIDPERR, "FID SRAM parity error" },
4130 { F_CFGSNPPERR, "config snoop FIFO parity error" },
4131 { F_HRSPPERR, "HMA channel response data SRAM parity error" },
4132 { F_HREQRDPERR, "HMA channel read request SRAM parity error" },
4133 { F_HREQWRPERR, "HMA channel write request SRAM parity error" },
4134 { F_DRSPPERR, "DMA channel response data SRAM parity error" },
4135 { F_DREQRDPERR, "DMA channel write request SRAM parity error" },
4136 { F_CRSPPERR, "CMD channel response data SRAM parity error" },
4137 { F_CREQRDPERR, "CMD channel read request SRAM parity error" },
4138 { F_MSTTAGQPERR, "PCIe master tag queue SRAM parity error" },
4139 { F_TGTTAGQPERR, "PCIe target tag queue FIFO parity error" },
4140 { F_PIOREQGRPPERR, "PIO request group FIFOs parity error" },
4141 { F_PIOCPLGRPPERR, "PIO completion group FIFOs parity error" },
4142 { F_MSIXDIPERR, "MSI-X DI SRAM parity error" },
4143 { F_MSIXDATAPERR, "MSI-X data SRAM parity error" },
4144 { F_MSIXADDRHPERR, "MSI-X AddrH SRAM parity error" },
4145 { F_MSIXADDRLPERR, "MSI-X AddrL SRAM parity error" },
4146 { F_MSIXSTIPERR, "MSI-X STI SRAM parity error" },
4147 { F_MSTTIMEOUTPERR, "Master timeout FIFO parity error" },
4148 { F_MSTGRPPERR, "Master response read queue SRAM parity error" },
4151 struct intr_info pcie_intr_info = {
4152 .name = "PCIE_INT_CAUSE",
4153 .cause_reg = A_PCIE_INT_CAUSE,
4154 .enable_reg = A_PCIE_INT_ENABLE,
4162 fatal |= t4_handle_intr(adap, &sysbus_intr_info, 0, verbose);
4163 fatal |= t4_handle_intr(adap, &pcie_port_intr_info, 0, verbose);
4165 pcie_intr_info.fatal = 0x3fffffc0;
4166 pcie_intr_info.details = pcie_intr_details;
4168 pcie_intr_info.fatal = is_t5(adap) ? 0xbfffff40 : 0x9fffff40;
4169 pcie_intr_info.details = t5_pcie_intr_details;
4171 fatal |= t4_handle_intr(adap, &pcie_intr_info, 0, verbose);
4177 * TP interrupt handler.
4179 static bool tp_intr_handler(struct adapter *adap, int arg, bool verbose)
4181 static const struct intr_details tp_intr_details[] = {
4182 { 0x3fffffff, "TP parity error" },
4183 { F_FLMTXFLSTEMPTY, "TP out of Tx pages" },
4186 static const struct intr_info tp_intr_info = {
4187 .name = "TP_INT_CAUSE",
4188 .cause_reg = A_TP_INT_CAUSE,
4189 .enable_reg = A_TP_INT_ENABLE,
4190 .fatal = 0x7fffffff,
4191 .details = tp_intr_details,
4195 return (t4_handle_intr(adap, &tp_intr_info, 0, verbose));
4199 * SGE interrupt handler.
4201 static bool sge_intr_handler(struct adapter *adap, int arg, bool verbose)
4203 static const struct intr_info sge_int1_info = {
4204 .name = "SGE_INT_CAUSE1",
4205 .cause_reg = A_SGE_INT_CAUSE1,
4206 .enable_reg = A_SGE_INT_ENABLE1,
4207 .fatal = 0xffffffff,
4211 static const struct intr_info sge_int2_info = {
4212 .name = "SGE_INT_CAUSE2",
4213 .cause_reg = A_SGE_INT_CAUSE2,
4214 .enable_reg = A_SGE_INT_ENABLE2,
4215 .fatal = 0xffffffff,
4219 static const struct intr_details sge_int3_details[] = {
4221 "DBP pointer delivery for invalid context or QID" },
4222 { F_ERR_FLM_IDMA1 | F_ERR_FLM_IDMA0,
4223 "Invalid QID or header request by IDMA" },
4224 { F_ERR_FLM_HINT, "FLM hint is for invalid context or QID" },
4225 { F_ERR_PCIE_ERROR3, "SGE PCIe error for DBP thread 3" },
4226 { F_ERR_PCIE_ERROR2, "SGE PCIe error for DBP thread 2" },
4227 { F_ERR_PCIE_ERROR1, "SGE PCIe error for DBP thread 1" },
4228 { F_ERR_PCIE_ERROR0, "SGE PCIe error for DBP thread 0" },
4229 { F_ERR_TIMER_ABOVE_MAX_QID,
4230 "SGE GTS with timer 0-5 for IQID > 1023" },
4231 { F_ERR_CPL_EXCEED_IQE_SIZE,
4232 "SGE received CPL exceeding IQE size" },
4233 { F_ERR_INVALID_CIDX_INC, "SGE GTS CIDX increment too large" },
4234 { F_ERR_ITP_TIME_PAUSED, "SGE ITP error" },
4235 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL" },
4236 { F_ERR_DROPPED_DB, "SGE DB dropped" },
4237 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
4238 "SGE IQID > 1023 received CPL for FL" },
4239 { F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
4240 F_ERR_BAD_DB_PIDX0, "SGE DBP pidx increment too large" },
4241 { F_ERR_ING_PCIE_CHAN, "SGE Ingress PCIe channel mismatch" },
4242 { F_ERR_ING_CTXT_PRIO,
4243 "Ingress context manager priority user error" },
4244 { F_ERR_EGR_CTXT_PRIO,
4245 "Egress context manager priority user error" },
4246 { F_DBFIFO_HP_INT, "High priority DB FIFO threshold reached" },
4247 { F_DBFIFO_LP_INT, "Low priority DB FIFO threshold reached" },
4248 { F_REG_ADDRESS_ERR, "Undefined SGE register accessed" },
4249 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID" },
4250 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID" },
4251 { 0x0000000f, "SGE context access for invalid queue" },
4254 static const struct intr_details t6_sge_int3_details[] = {
4256 "DBP pointer delivery for invalid context or QID" },
4257 { F_ERR_FLM_IDMA1 | F_ERR_FLM_IDMA0,
4258 "Invalid QID or header request by IDMA" },
4259 { F_ERR_FLM_HINT, "FLM hint is for invalid context or QID" },
4260 { F_ERR_PCIE_ERROR3, "SGE PCIe error for DBP thread 3" },
4261 { F_ERR_PCIE_ERROR2, "SGE PCIe error for DBP thread 2" },
4262 { F_ERR_PCIE_ERROR1, "SGE PCIe error for DBP thread 1" },
4263 { F_ERR_PCIE_ERROR0, "SGE PCIe error for DBP thread 0" },
4264 { F_ERR_TIMER_ABOVE_MAX_QID,
4265 "SGE GTS with timer 0-5 for IQID > 1023" },
4266 { F_ERR_CPL_EXCEED_IQE_SIZE,
4267 "SGE received CPL exceeding IQE size" },
4268 { F_ERR_INVALID_CIDX_INC, "SGE GTS CIDX increment too large" },
4269 { F_ERR_ITP_TIME_PAUSED, "SGE ITP error" },
4270 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL" },
4271 { F_ERR_DROPPED_DB, "SGE DB dropped" },
4272 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
4273 "SGE IQID > 1023 received CPL for FL" },
4274 { F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
4275 F_ERR_BAD_DB_PIDX0, "SGE DBP pidx increment too large" },
4276 { F_ERR_ING_PCIE_CHAN, "SGE Ingress PCIe channel mismatch" },
4277 { F_ERR_ING_CTXT_PRIO,
4278 "Ingress context manager priority user error" },
4279 { F_ERR_EGR_CTXT_PRIO,
4280 "Egress context manager priority user error" },
4281 { F_DBP_TBUF_FULL, "SGE DBP tbuf full" },
4283 "SGE WRE packet less than advertized length" },
4284 { F_REG_ADDRESS_ERR, "Undefined SGE register accessed" },
4285 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID" },
4286 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID" },
4287 { 0x0000000f, "SGE context access for invalid queue" },
4290 struct intr_info sge_int3_info = {
4291 .name = "SGE_INT_CAUSE3",
4292 .cause_reg = A_SGE_INT_CAUSE3,
4293 .enable_reg = A_SGE_INT_ENABLE3,
4294 .fatal = F_ERR_CPL_EXCEED_IQE_SIZE,
4298 static const struct intr_info sge_int4_info = {
4299 .name = "SGE_INT_CAUSE4",
4300 .cause_reg = A_SGE_INT_CAUSE4,
4301 .enable_reg = A_SGE_INT_ENABLE4,
4306 static const struct intr_info sge_int5_info = {
4307 .name = "SGE_INT_CAUSE5",
4308 .cause_reg = A_SGE_INT_CAUSE5,
4309 .enable_reg = A_SGE_INT_ENABLE5,
4310 .fatal = 0xffffffff,
4314 static const struct intr_info sge_int6_info = {
4315 .name = "SGE_INT_CAUSE6",
4316 .cause_reg = A_SGE_INT_CAUSE6,
4317 .enable_reg = A_SGE_INT_ENABLE6,
4326 if (chip_id(adap) <= CHELSIO_T5) {
4327 sge_int3_info.details = sge_int3_details;
4329 sge_int3_info.details = t6_sge_int3_details;
4333 fatal |= t4_handle_intr(adap, &sge_int1_info, 0, verbose);
4334 fatal |= t4_handle_intr(adap, &sge_int2_info, 0, verbose);
4335 fatal |= t4_handle_intr(adap, &sge_int3_info, 0, verbose);
4336 fatal |= t4_handle_intr(adap, &sge_int4_info, 0, verbose);
4337 if (chip_id(adap) >= CHELSIO_T5)
4338 fatal |= t4_handle_intr(adap, &sge_int5_info, 0, verbose);
4339 if (chip_id(adap) >= CHELSIO_T6)
4340 fatal |= t4_handle_intr(adap, &sge_int6_info, 0, verbose);
4342 v = t4_read_reg(adap, A_SGE_ERROR_STATS);
4343 if (v & F_ERROR_QID_VALID) {
4344 CH_ERR(adap, "SGE error for QID %u\n", G_ERROR_QID(v));
4345 if (v & F_UNCAPTURED_ERROR)
4346 CH_ERR(adap, "SGE UNCAPTURED_ERROR set (clearing)\n");
4347 t4_write_reg(adap, A_SGE_ERROR_STATS,
4348 F_ERROR_QID_VALID | F_UNCAPTURED_ERROR);
4355 * CIM interrupt handler.
4357 static bool cim_intr_handler(struct adapter *adap, int arg, bool verbose)
4359 static const struct intr_action cim_host_intr_actions[] = {
4360 { F_TIMER0INT, 0, t4_os_dump_cimla },
4363 static const struct intr_details cim_host_intr_details[] = {
4365 { F_PCIE2CIMINTFPARERR, "CIM IBQ PCIe interface parity error" },
4368 { F_MA_CIM_INTFPERR, "MA2CIM interface parity error" },
4369 { F_PLCIM_MSTRSPDATAPARERR,
4370 "PL2CIM master response data parity error" },
4371 { F_NCSI2CIMINTFPARERR, "CIM IBQ NC-SI interface parity error" },
4372 { F_SGE2CIMINTFPARERR, "CIM IBQ SGE interface parity error" },
4373 { F_ULP2CIMINTFPARERR, "CIM IBQ ULP_TX interface parity error" },
4374 { F_TP2CIMINTFPARERR, "CIM IBQ TP interface parity error" },
4375 { F_OBQSGERX1PARERR, "CIM OBQ SGE1_RX parity error" },
4376 { F_OBQSGERX0PARERR, "CIM OBQ SGE0_RX parity error" },
4379 { F_TIEQOUTPARERRINT, "CIM TIEQ outgoing FIFO parity error" },
4380 { F_TIEQINPARERRINT, "CIM TIEQ incoming FIFO parity error" },
4381 { F_MBHOSTPARERR, "CIM mailbox host read parity error" },
4382 { F_MBUPPARERR, "CIM mailbox uP parity error" },
4383 { F_IBQTP0PARERR, "CIM IBQ TP0 parity error" },
4384 { F_IBQTP1PARERR, "CIM IBQ TP1 parity error" },
4385 { F_IBQULPPARERR, "CIM IBQ ULP parity error" },
4386 { F_IBQSGELOPARERR, "CIM IBQ SGE_LO parity error" },
4387 { F_IBQSGEHIPARERR | F_IBQPCIEPARERR, /* same bit */
4388 "CIM IBQ PCIe/SGE_HI parity error" },
4389 { F_IBQNCSIPARERR, "CIM IBQ NC-SI parity error" },
4390 { F_OBQULP0PARERR, "CIM OBQ ULP0 parity error" },
4391 { F_OBQULP1PARERR, "CIM OBQ ULP1 parity error" },
4392 { F_OBQULP2PARERR, "CIM OBQ ULP2 parity error" },
4393 { F_OBQULP3PARERR, "CIM OBQ ULP3 parity error" },
4394 { F_OBQSGEPARERR, "CIM OBQ SGE parity error" },
4395 { F_OBQNCSIPARERR, "CIM OBQ NC-SI parity error" },
4396 { F_TIMER1INT, "CIM TIMER0 interrupt" },
4397 { F_TIMER0INT, "CIM TIMER0 interrupt" },
4398 { F_PREFDROPINT, "CIM control register prefetch drop" },
4401 struct intr_info cim_host_intr_info = {
4402 .name = "CIM_HOST_INT_CAUSE",
4403 .cause_reg = A_CIM_HOST_INT_CAUSE,
4404 .enable_reg = A_CIM_HOST_INT_ENABLE,
4406 .details = cim_host_intr_details,
4407 .actions = cim_host_intr_actions,
4409 static const struct intr_details cim_host_upacc_intr_details[] = {
4410 { F_EEPROMWRINT, "CIM EEPROM came out of busy state" },
4411 { F_TIMEOUTMAINT, "CIM PIF MA timeout" },
4412 { F_TIMEOUTINT, "CIM PIF timeout" },
4413 { F_RSPOVRLOOKUPINT, "CIM response FIFO overwrite" },
4414 { F_REQOVRLOOKUPINT, "CIM request FIFO overwrite" },
4415 { F_BLKWRPLINT, "CIM block write to PL space" },
4416 { F_BLKRDPLINT, "CIM block read from PL space" },
4418 "CIM single write to PL space with illegal BEs" },
4420 "CIM single read from PL space with illegal BEs" },
4421 { F_BLKWRCTLINT, "CIM block write to CTL space" },
4422 { F_BLKRDCTLINT, "CIM block read from CTL space" },
4424 "CIM single write to CTL space with illegal BEs" },
4426 "CIM single read from CTL space with illegal BEs" },
4427 { F_BLKWREEPROMINT, "CIM block write to EEPROM space" },
4428 { F_BLKRDEEPROMINT, "CIM block read from EEPROM space" },
4430 "CIM single write to EEPROM space with illegal BEs" },
4432 "CIM single read from EEPROM space with illegal BEs" },
4433 { F_BLKWRFLASHINT, "CIM block write to flash space" },
4434 { F_BLKRDFLASHINT, "CIM block read from flash space" },
4435 { F_SGLWRFLASHINT, "CIM single write to flash space" },
4437 "CIM single read from flash space with illegal BEs" },
4438 { F_BLKWRBOOTINT, "CIM block write to boot space" },
4439 { F_BLKRDBOOTINT, "CIM block read from boot space" },
4440 { F_SGLWRBOOTINT, "CIM single write to boot space" },
4442 "CIM single read from boot space with illegal BEs" },
4443 { F_ILLWRBEINT, "CIM illegal write BEs" },
4444 { F_ILLRDBEINT, "CIM illegal read BEs" },
4445 { F_ILLRDINT, "CIM illegal read" },
4446 { F_ILLWRINT, "CIM illegal write" },
4447 { F_ILLTRANSINT, "CIM illegal transaction" },
4448 { F_RSVDSPACEINT, "CIM reserved space access" },
4451 static const struct intr_info cim_host_upacc_intr_info = {
4452 .name = "CIM_HOST_UPACC_INT_CAUSE",
4453 .cause_reg = A_CIM_HOST_UPACC_INT_CAUSE,
4454 .enable_reg = A_CIM_HOST_UPACC_INT_ENABLE,
4455 .fatal = 0x3fffeeff,
4456 .details = cim_host_upacc_intr_details,
4459 static const struct intr_info cim_pf_host_intr_info = {
4460 .name = "CIM_PF_HOST_INT_CAUSE",
4461 .cause_reg = MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
4462 .enable_reg = MYPF_REG(A_CIM_PF_HOST_INT_ENABLE),
4470 fw_err = t4_read_reg(adap, A_PCIE_FW);
4471 if (fw_err & F_PCIE_FW_ERR)
4472 t4_report_fw_error(adap);
4475 * When the Firmware detects an internal error which normally wouldn't
4476 * raise a Host Interrupt, it forces a CIM Timer0 interrupt in order
4477 * to make sure the Host sees the Firmware Crash. So if we have a
4478 * Timer0 interrupt and don't see a Firmware Crash, ignore the Timer0
4481 val = t4_read_reg(adap, A_CIM_HOST_INT_CAUSE);
4482 if (val & F_TIMER0INT && (!(fw_err & F_PCIE_FW_ERR) ||
4483 G_PCIE_FW_EVAL(fw_err) != PCIE_FW_EVAL_CRASH)) {
4484 t4_write_reg(adap, A_CIM_HOST_INT_CAUSE, F_TIMER0INT);
4489 cim_host_intr_info.fatal = 0x001fffe2;
4490 else if (is_t5(adap))
4491 cim_host_intr_info.fatal = 0x007dffe2;
4493 cim_host_intr_info.fatal = 0x007dffe6;
4494 fatal |= t4_handle_intr(adap, &cim_host_intr_info, 0, verbose);
4495 fatal |= t4_handle_intr(adap, &cim_host_upacc_intr_info, 0, verbose);
4496 fatal |= t4_handle_intr(adap, &cim_pf_host_intr_info, 0, verbose);
4502 * ULP RX interrupt handler.
4504 static bool ulprx_intr_handler(struct adapter *adap, int arg, bool verbose)
4506 static const struct intr_details ulprx_intr_details[] = {
4508 { F_SE_CNT_MISMATCH_1, "ULPRX SE count mismatch in channel 1" },
4509 { F_SE_CNT_MISMATCH_0, "ULPRX SE count mismatch in channel 0" },
4512 { F_CAUSE_CTX_1, "ULPRX channel 1 context error" },
4513 { F_CAUSE_CTX_0, "ULPRX channel 0 context error" },
4514 { 0x007fffff, "ULPRX parity error" },
4517 static const struct intr_info ulprx_intr_info = {
4518 .name = "ULP_RX_INT_CAUSE",
4519 .cause_reg = A_ULP_RX_INT_CAUSE,
4520 .enable_reg = A_ULP_RX_INT_ENABLE,
4521 .fatal = 0x07ffffff,
4522 .details = ulprx_intr_details,
4525 static const struct intr_info ulprx_intr2_info = {
4526 .name = "ULP_RX_INT_CAUSE_2",
4527 .cause_reg = A_ULP_RX_INT_CAUSE_2,
4528 .enable_reg = A_ULP_RX_INT_ENABLE_2,
4535 fatal |= t4_handle_intr(adap, &ulprx_intr_info, 0, verbose);
4536 fatal |= t4_handle_intr(adap, &ulprx_intr2_info, 0, verbose);
4542 * ULP TX interrupt handler.
4544 static bool ulptx_intr_handler(struct adapter *adap, int arg, bool verbose)
4546 static const struct intr_details ulptx_intr_details[] = {
4547 { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds" },
4548 { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds" },
4549 { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds" },
4550 { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds" },
4551 { 0x0fffffff, "ULPTX parity error" },
4554 static const struct intr_info ulptx_intr_info = {
4555 .name = "ULP_TX_INT_CAUSE",
4556 .cause_reg = A_ULP_TX_INT_CAUSE,
4557 .enable_reg = A_ULP_TX_INT_ENABLE,
4558 .fatal = 0x0fffffff,
4559 .details = ulptx_intr_details,
4562 static const struct intr_info ulptx_intr2_info = {
4563 .name = "ULP_TX_INT_CAUSE_2",
4564 .cause_reg = A_ULP_TX_INT_CAUSE_2,
4565 .enable_reg = A_ULP_TX_INT_ENABLE_2,
4572 fatal |= t4_handle_intr(adap, &ulptx_intr_info, 0, verbose);
4573 fatal |= t4_handle_intr(adap, &ulptx_intr2_info, 0, verbose);
4578 static bool pmtx_dump_dbg_stats(struct adapter *adap, int arg, bool verbose)
4583 t4_read_indirect(adap, A_PM_TX_DBG_CTRL, A_PM_TX_DBG_DATA, &data[0],
4584 ARRAY_SIZE(data), A_PM_TX_DBG_STAT0);
4585 for (i = 0; i < ARRAY_SIZE(data); i++) {
4586 CH_ALERT(adap, " - PM_TX_DBG_STAT%u (0x%x) = 0x%08x\n", i,
4587 A_PM_TX_DBG_STAT0 + i, data[i]);
4594 * PM TX interrupt handler.
4596 static bool pmtx_intr_handler(struct adapter *adap, int arg, bool verbose)
4598 static const struct intr_action pmtx_intr_actions[] = {
4599 { 0xffffffff, 0, pmtx_dump_dbg_stats },
4602 static const struct intr_details pmtx_intr_details[] = {
4603 { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large" },
4604 { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large" },
4605 { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large" },
4606 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd" },
4607 { 0x0f000000, "PMTX icspi FIFO2X Rx framing error" },
4608 { 0x00f00000, "PMTX icspi FIFO Rx framing error" },
4609 { 0x000f0000, "PMTX icspi FIFO Tx framing error" },
4610 { 0x0000f000, "PMTX oespi FIFO Rx framing error" },
4611 { 0x00000f00, "PMTX oespi FIFO Tx framing error" },
4612 { 0x000000f0, "PMTX oespi FIFO2X Tx framing error" },
4613 { F_OESPI_PAR_ERROR, "PMTX oespi parity error" },
4614 { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error" },
4615 { F_ICSPI_PAR_ERROR, "PMTX icspi parity error" },
4616 { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error" },
4619 static const struct intr_info pmtx_intr_info = {
4620 .name = "PM_TX_INT_CAUSE",
4621 .cause_reg = A_PM_TX_INT_CAUSE,
4622 .enable_reg = A_PM_TX_INT_ENABLE,
4623 .fatal = 0xffffffff,
4624 .details = pmtx_intr_details,
4625 .actions = pmtx_intr_actions,
4628 return (t4_handle_intr(adap, &pmtx_intr_info, 0, verbose));
4632 * PM RX interrupt handler.
4634 static bool pmrx_intr_handler(struct adapter *adap, int arg, bool verbose)
4636 static const struct intr_details pmrx_intr_details[] = {
4638 { 0x18000000, "PMRX ospi overflow" },
4639 { F_MA_INTF_SDC_ERR, "PMRX MA interface SDC parity error" },
4640 { F_BUNDLE_LEN_PARERR, "PMRX bundle len FIFO parity error" },
4641 { F_BUNDLE_LEN_OVFL, "PMRX bundle len FIFO overflow" },
4642 { F_SDC_ERR, "PMRX SDC error" },
4645 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd" },
4646 { 0x003c0000, "PMRX iespi FIFO2X Rx framing error" },
4647 { 0x0003c000, "PMRX iespi Rx framing error" },
4648 { 0x00003c00, "PMRX iespi Tx framing error" },
4649 { 0x00000300, "PMRX ocspi Rx framing error" },
4650 { 0x000000c0, "PMRX ocspi Tx framing error" },
4651 { 0x00000030, "PMRX ocspi FIFO2X Tx framing error" },
4652 { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error" },
4653 { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error" },
4654 { F_IESPI_PAR_ERROR, "PMRX iespi parity error" },
4655 { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error"},
4658 static const struct intr_info pmrx_intr_info = {
4659 .name = "PM_RX_INT_CAUSE",
4660 .cause_reg = A_PM_RX_INT_CAUSE,
4661 .enable_reg = A_PM_RX_INT_ENABLE,
4662 .fatal = 0x1fffffff,
4663 .details = pmrx_intr_details,
4667 return (t4_handle_intr(adap, &pmrx_intr_info, 0, verbose));
4671 * CPL switch interrupt handler.
4673 static bool cplsw_intr_handler(struct adapter *adap, int arg, bool verbose)
4675 static const struct intr_details cplsw_intr_details[] = {
4677 { F_PERR_CPL_128TO128_1, "CPLSW 128TO128 FIFO1 parity error" },
4678 { F_PERR_CPL_128TO128_0, "CPLSW 128TO128 FIFO0 parity error" },
4681 { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error" },
4682 { F_CIM_OVFL_ERROR, "CPLSW CIM overflow" },
4683 { F_TP_FRAMING_ERROR, "CPLSW TP framing error" },
4684 { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error" },
4685 { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error" },
4686 { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error" },
4689 struct intr_info cplsw_intr_info = {
4690 .name = "CPL_INTR_CAUSE",
4691 .cause_reg = A_CPL_INTR_CAUSE,
4692 .enable_reg = A_CPL_INTR_ENABLE,
4694 .details = cplsw_intr_details,
4699 cplsw_intr_info.fatal = 0x2f;
4700 else if (is_t5(adap))
4701 cplsw_intr_info.fatal = 0xef;
4703 cplsw_intr_info.fatal = 0xff;
4705 return (t4_handle_intr(adap, &cplsw_intr_info, 0, verbose));
4708 #define T4_LE_FATAL_MASK (F_PARITYERR | F_UNKNOWNCMD | F_REQQPARERR)
4709 #define T6_LE_PERRCRC_MASK (F_PIPELINEERR | F_CLIPTCAMACCFAIL | \
4710 F_SRVSRAMACCFAIL | F_CLCAMCRCPARERR | F_CLCAMINTPERR | F_SSRAMINTPERR | \
4711 F_SRVSRAMPERR | F_VFSRAMPERR | F_TCAMINTPERR | F_TCAMCRCERR | \
4712 F_HASHTBLMEMACCERR | F_MAIFWRINTPERR | F_HASHTBLMEMCRCERR)
4713 #define T6_LE_FATAL_MASK (T6_LE_PERRCRC_MASK | F_T6_UNKNOWNCMD | \
4714 F_TCAMACCFAIL | F_HASHTBLACCFAIL | F_CMDTIDERR | F_CMDPRSRINTERR | \
4715 F_TOTCNTERR | F_CLCAMFIFOERR | F_CLIPSUBERR)
4718 * LE interrupt handler.
4720 static bool le_intr_handler(struct adapter *adap, int arg, bool verbose)
4722 static const struct intr_details le_intr_details[] = {
4723 { F_REQQPARERR, "LE request queue parity error" },
4724 { F_UNKNOWNCMD, "LE unknown command" },
4725 { F_ACTRGNFULL, "LE active region full" },
4726 { F_PARITYERR, "LE parity error" },
4727 { F_LIPMISS, "LE LIP miss" },
4728 { F_LIP0, "LE 0 LIP error" },
4731 static const struct intr_details t6_le_intr_details[] = {
4732 { F_CLIPSUBERR, "LE CLIP CAM reverse substitution error" },
4733 { F_CLCAMFIFOERR, "LE CLIP CAM internal FIFO error" },
4734 { F_CTCAMINVLDENT, "Invalid IPv6 CLIP TCAM entry" },
4735 { F_TCAMINVLDENT, "Invalid IPv6 TCAM entry" },
4736 { F_TOTCNTERR, "LE total active < TCAM count" },
4737 { F_CMDPRSRINTERR, "LE internal error in parser" },
4738 { F_CMDTIDERR, "Incorrect tid in LE command" },
4739 { F_T6_ACTRGNFULL, "LE active region full" },
4740 { F_T6_ACTCNTIPV6TZERO, "LE IPv6 active open TCAM counter -ve" },
4741 { F_T6_ACTCNTIPV4TZERO, "LE IPv4 active open TCAM counter -ve" },
4742 { F_T6_ACTCNTIPV6ZERO, "LE IPv6 active open counter -ve" },
4743 { F_T6_ACTCNTIPV4ZERO, "LE IPv4 active open counter -ve" },
4744 { F_HASHTBLACCFAIL, "Hash table read error (proto conflict)" },
4745 { F_TCAMACCFAIL, "LE TCAM access failure" },
4746 { F_T6_UNKNOWNCMD, "LE unknown command" },
4747 { F_T6_LIP0, "LE found 0 LIP during CLIP substitution" },
4748 { F_T6_LIPMISS, "LE CLIP lookup miss" },
4749 { T6_LE_PERRCRC_MASK, "LE parity/CRC error" },
4752 struct intr_info le_intr_info = {
4753 .name = "LE_DB_INT_CAUSE",
4754 .cause_reg = A_LE_DB_INT_CAUSE,
4755 .enable_reg = A_LE_DB_INT_ENABLE,
4761 if (chip_id(adap) <= CHELSIO_T5) {
4762 le_intr_info.details = le_intr_details;
4763 le_intr_info.fatal = T4_LE_FATAL_MASK;
4765 le_intr_info.fatal |= F_VFPARERR;
4767 le_intr_info.details = t6_le_intr_details;
4768 le_intr_info.fatal = T6_LE_FATAL_MASK;
4771 return (t4_handle_intr(adap, &le_intr_info, 0, verbose));
4775 * MPS interrupt handler.
4777 static bool mps_intr_handler(struct adapter *adap, int arg, bool verbose)
4779 static const struct intr_details mps_rx_perr_intr_details[] = {
4780 { 0xffffffff, "MPS Rx parity error" },
4783 static const struct intr_info mps_rx_perr_intr_info = {
4784 .name = "MPS_RX_PERR_INT_CAUSE",
4785 .cause_reg = A_MPS_RX_PERR_INT_CAUSE,
4786 .enable_reg = A_MPS_RX_PERR_INT_ENABLE,
4787 .fatal = 0xffffffff,
4788 .details = mps_rx_perr_intr_details,
4791 static const struct intr_details mps_tx_intr_details[] = {
4792 { F_PORTERR, "MPS Tx destination port is disabled" },
4793 { F_FRMERR, "MPS Tx framing error" },
4794 { F_SECNTERR, "MPS Tx SOP/EOP error" },
4795 { F_BUBBLE, "MPS Tx underflow" },
4796 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error" },
4797 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error" },
4798 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error" },
4799 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error" },
4802 struct intr_info mps_tx_intr_info = {
4803 .name = "MPS_TX_INT_CAUSE",
4804 .cause_reg = A_MPS_TX_INT_CAUSE,
4805 .enable_reg = A_MPS_TX_INT_ENABLE,
4807 .details = mps_tx_intr_details,
4810 static const struct intr_details mps_trc_intr_details[] = {
4811 { F_MISCPERR, "MPS TRC misc parity error" },
4812 { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error" },
4813 { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error" },
4816 static const struct intr_info mps_trc_intr_info = {
4817 .name = "MPS_TRC_INT_CAUSE",
4818 .cause_reg = A_MPS_TRC_INT_CAUSE,
4819 .enable_reg = A_MPS_TRC_INT_ENABLE,
4820 .fatal = F_MISCPERR | V_PKTFIFO(M_PKTFIFO) | V_FILTMEM(M_FILTMEM),
4821 .details = mps_trc_intr_details,
4824 static const struct intr_details mps_stat_sram_intr_details[] = {
4825 { 0xffffffff, "MPS statistics SRAM parity error" },
4828 static const struct intr_info mps_stat_sram_intr_info = {
4829 .name = "MPS_STAT_PERR_INT_CAUSE_SRAM",
4830 .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_SRAM,
4831 .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_SRAM,
4832 .fatal = 0x1fffffff,
4833 .details = mps_stat_sram_intr_details,
4836 static const struct intr_details mps_stat_tx_intr_details[] = {
4837 { 0xffffff, "MPS statistics Tx FIFO parity error" },
4840 static const struct intr_info mps_stat_tx_intr_info = {
4841 .name = "MPS_STAT_PERR_INT_CAUSE_TX_FIFO",
4842 .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
4843 .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_TX_FIFO,
4845 .details = mps_stat_tx_intr_details,
4848 static const struct intr_details mps_stat_rx_intr_details[] = {
4849 { 0xffffff, "MPS statistics Rx FIFO parity error" },
4852 static const struct intr_info mps_stat_rx_intr_info = {
4853 .name = "MPS_STAT_PERR_INT_CAUSE_RX_FIFO",
4854 .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
4855 .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_RX_FIFO,
4857 .details = mps_stat_rx_intr_details,
4860 static const struct intr_details mps_cls_intr_details[] = {
4861 { F_HASHSRAM, "MPS hash SRAM parity error" },
4862 { F_MATCHTCAM, "MPS match TCAM parity error" },
4863 { F_MATCHSRAM, "MPS match SRAM parity error" },
4866 static const struct intr_info mps_cls_intr_info = {
4867 .name = "MPS_CLS_INT_CAUSE",
4868 .cause_reg = A_MPS_CLS_INT_CAUSE,
4869 .enable_reg = A_MPS_CLS_INT_ENABLE,
4870 .fatal = F_MATCHSRAM | F_MATCHTCAM | F_HASHSRAM,
4871 .details = mps_cls_intr_details,
4874 static const struct intr_details mps_stat_sram1_intr_details[] = {
4875 { 0xff, "MPS statistics SRAM1 parity error" },
4878 static const struct intr_info mps_stat_sram1_intr_info = {
4879 .name = "MPS_STAT_PERR_INT_CAUSE_SRAM1",
4880 .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_SRAM1,
4881 .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_SRAM1,
4883 .details = mps_stat_sram1_intr_details,
4889 if (chip_id(adap) == CHELSIO_T6)
4890 mps_tx_intr_info.fatal &= ~F_BUBBLE;
4893 fatal |= t4_handle_intr(adap, &mps_rx_perr_intr_info, 0, verbose);
4894 fatal |= t4_handle_intr(adap, &mps_tx_intr_info, 0, verbose);
4895 fatal |= t4_handle_intr(adap, &mps_trc_intr_info, 0, verbose);
4896 fatal |= t4_handle_intr(adap, &mps_stat_sram_intr_info, 0, verbose);
4897 fatal |= t4_handle_intr(adap, &mps_stat_tx_intr_info, 0, verbose);
4898 fatal |= t4_handle_intr(adap, &mps_stat_rx_intr_info, 0, verbose);
4899 fatal |= t4_handle_intr(adap, &mps_cls_intr_info, 0, verbose);
4900 if (chip_id(adap) > CHELSIO_T4) {
4901 fatal |= t4_handle_intr(adap, &mps_stat_sram1_intr_info, 0,
4905 t4_write_reg(adap, A_MPS_INT_CAUSE, is_t4(adap) ? 0 : 0xffffffff);
4906 t4_read_reg(adap, A_MPS_INT_CAUSE); /* flush */
4913 * EDC/MC interrupt handler.
4915 static bool mem_intr_handler(struct adapter *adap, int idx, bool verbose)
4917 static const char name[4][5] = { "EDC0", "EDC1", "MC0", "MC1" };
4918 unsigned int count_reg, v;
4919 static const struct intr_details mem_intr_details[] = {
4920 { F_ECC_UE_INT_CAUSE, "Uncorrectable ECC data error(s)" },
4921 { F_ECC_CE_INT_CAUSE, "Correctable ECC data error(s)" },
4922 { F_PERR_INT_CAUSE, "FIFO parity error" },
4925 struct intr_info ii = {
4926 .fatal = F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE,
4927 .details = mem_intr_details,
4934 ii.name = "EDC0_INT_CAUSE";
4935 ii.cause_reg = EDC_REG(A_EDC_INT_CAUSE, 0);
4936 ii.enable_reg = EDC_REG(A_EDC_INT_ENABLE, 0);
4937 count_reg = EDC_REG(A_EDC_ECC_STATUS, 0);
4940 ii.name = "EDC1_INT_CAUSE";
4941 ii.cause_reg = EDC_REG(A_EDC_INT_CAUSE, 1);
4942 ii.enable_reg = EDC_REG(A_EDC_INT_ENABLE, 1);
4943 count_reg = EDC_REG(A_EDC_ECC_STATUS, 1);
4946 ii.name = "MC0_INT_CAUSE";
4948 ii.cause_reg = A_MC_INT_CAUSE;
4949 ii.enable_reg = A_MC_INT_ENABLE;
4950 count_reg = A_MC_ECC_STATUS;
4952 ii.cause_reg = A_MC_P_INT_CAUSE;
4953 ii.enable_reg = A_MC_P_INT_ENABLE;
4954 count_reg = A_MC_P_ECC_STATUS;
4958 ii.name = "MC1_INT_CAUSE";
4959 ii.cause_reg = MC_REG(A_MC_P_INT_CAUSE, 1);
4960 ii.enable_reg = MC_REG(A_MC_P_INT_ENABLE, 1);
4961 count_reg = MC_REG(A_MC_P_ECC_STATUS, 1);
4965 fatal = t4_handle_intr(adap, &ii, 0, verbose);
4967 v = t4_read_reg(adap, count_reg);
4969 if (G_ECC_UECNT(v) != 0) {
4971 "%s: %u uncorrectable ECC data error(s)\n",
4972 name[idx], G_ECC_UECNT(v));
4974 if (G_ECC_CECNT(v) != 0) {
4975 if (idx <= MEM_EDC1)
4976 t4_edc_err_read(adap, idx);
4977 CH_WARN_RATELIMIT(adap,
4978 "%s: %u correctable ECC data error(s)\n",
4979 name[idx], G_ECC_CECNT(v));
4981 t4_write_reg(adap, count_reg, 0xffffffff);
4987 static bool ma_wrap_status(struct adapter *adap, int arg, bool verbose)
4991 v = t4_read_reg(adap, A_MA_INT_WRAP_STATUS);
4993 "MA address wrap-around error by client %u to address %#x\n",
4994 G_MEM_WRAP_CLIENT_NUM(v), G_MEM_WRAP_ADDRESS(v) << 4);
4995 t4_write_reg(adap, A_MA_INT_WRAP_STATUS, v);
5002 * MA interrupt handler.
5004 static bool ma_intr_handler(struct adapter *adap, int arg, bool verbose)
5006 static const struct intr_action ma_intr_actions[] = {
5007 { F_MEM_WRAP_INT_CAUSE, 0, ma_wrap_status },
5010 static const struct intr_info ma_intr_info = {
5011 .name = "MA_INT_CAUSE",
5012 .cause_reg = A_MA_INT_CAUSE,
5013 .enable_reg = A_MA_INT_ENABLE,
5014 .fatal = F_MEM_WRAP_INT_CAUSE | F_MEM_PERR_INT_CAUSE |
5017 .actions = ma_intr_actions,
5019 static const struct intr_info ma_perr_status1 = {
5020 .name = "MA_PARITY_ERROR_STATUS1",
5021 .cause_reg = A_MA_PARITY_ERROR_STATUS1,
5022 .enable_reg = A_MA_PARITY_ERROR_ENABLE1,
5023 .fatal = 0xffffffff,
5027 static const struct intr_info ma_perr_status2 = {
5028 .name = "MA_PARITY_ERROR_STATUS2",
5029 .cause_reg = A_MA_PARITY_ERROR_STATUS2,
5030 .enable_reg = A_MA_PARITY_ERROR_ENABLE2,
5031 .fatal = 0xffffffff,
5038 fatal |= t4_handle_intr(adap, &ma_intr_info, 0, verbose);
5039 fatal |= t4_handle_intr(adap, &ma_perr_status1, 0, verbose);
5040 if (chip_id(adap) > CHELSIO_T4)
5041 fatal |= t4_handle_intr(adap, &ma_perr_status2, 0, verbose);
5047 * SMB interrupt handler.
5049 static bool smb_intr_handler(struct adapter *adap, int arg, bool verbose)
5051 static const struct intr_details smb_intr_details[] = {
5052 { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error" },
5053 { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error" },
5054 { F_SLVFIFOPARINT, "SMB slave FIFO parity error" },
5057 static const struct intr_info smb_intr_info = {
5058 .name = "SMB_INT_CAUSE",
5059 .cause_reg = A_SMB_INT_CAUSE,
5060 .enable_reg = A_SMB_INT_ENABLE,
5061 .fatal = F_SLVFIFOPARINT | F_MSTRXFIFOPARINT | F_MSTTXFIFOPARINT,
5062 .details = smb_intr_details,
5066 return (t4_handle_intr(adap, &smb_intr_info, 0, verbose));
5070 * NC-SI interrupt handler.
5072 static bool ncsi_intr_handler(struct adapter *adap, int arg, bool verbose)
5074 static const struct intr_details ncsi_intr_details[] = {
5075 { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error" },
5076 { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error" },
5077 { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error" },
5078 { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error" },
5081 static const struct intr_info ncsi_intr_info = {
5082 .name = "NCSI_INT_CAUSE",
5083 .cause_reg = A_NCSI_INT_CAUSE,
5084 .enable_reg = A_NCSI_INT_ENABLE,
5085 .fatal = F_RXFIFO_PRTY_ERR | F_TXFIFO_PRTY_ERR |
5086 F_MPS_DM_PRTY_ERR | F_CIM_DM_PRTY_ERR,
5087 .details = ncsi_intr_details,
5091 return (t4_handle_intr(adap, &ncsi_intr_info, 0, verbose));
5095 * MAC interrupt handler.
5097 static bool mac_intr_handler(struct adapter *adap, int port, bool verbose)
5099 static const struct intr_details mac_intr_details[] = {
5100 { F_TXFIFO_PRTY_ERR, "MAC Tx FIFO parity error" },
5101 { F_RXFIFO_PRTY_ERR, "MAC Rx FIFO parity error" },
5105 struct intr_info ii;
5109 snprintf(name, sizeof(name), "XGMAC_PORT%u_INT_CAUSE", port);
5111 ii.cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
5112 ii.enable_reg = PORT_REG(port, A_XGMAC_PORT_INT_EN);
5113 ii.fatal = F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR,
5114 ii.details = mac_intr_details,
5117 snprintf(name, sizeof(name), "MAC_PORT%u_INT_CAUSE", port);
5119 ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
5120 ii.enable_reg = T5_PORT_REG(port, A_MAC_PORT_INT_EN);
5121 ii.fatal = F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR,
5122 ii.details = mac_intr_details,
5125 fatal |= t4_handle_intr(adap, &ii, 0, verbose);
5127 if (chip_id(adap) >= CHELSIO_T5) {
5128 snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE", port);
5130 ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_CAUSE);
5131 ii.enable_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_EN);
5135 fatal |= t4_handle_intr(adap, &ii, 0, verbose);
5138 if (chip_id(adap) >= CHELSIO_T6) {
5139 snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE_100G", port);
5141 ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_CAUSE_100G);
5142 ii.enable_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_EN_100G);
5146 fatal |= t4_handle_intr(adap, &ii, 0, verbose);
5152 static bool plpl_intr_handler(struct adapter *adap, int arg, bool verbose)
5154 static const struct intr_details plpl_intr_details[] = {
5155 { F_FATALPERR, "Fatal parity error" },
5156 { F_PERRVFID, "VFID_MAP parity error" },
5159 struct intr_info plpl_intr_info = {
5160 .name = "PL_PL_INT_CAUSE",
5161 .cause_reg = A_PL_PL_INT_CAUSE,
5162 .enable_reg = A_PL_PL_INT_ENABLE,
5163 .fatal = F_FATALPERR,
5164 .details = plpl_intr_details,
5169 plpl_intr_info.fatal |= F_PERRVFID;
5171 return (t4_handle_intr(adap, &plpl_intr_info, 0, verbose));
5175 * t4_slow_intr_handler - control path interrupt handler
5176 * @adap: the adapter
5177 * @verbose: increased verbosity, for debug
5179 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
5180 * The designation 'slow' is because it involves register reads, while
5181 * data interrupts typically don't involve any MMIOs.
5183 int t4_slow_intr_handler(struct adapter *adap, bool verbose)
5185 static const struct intr_details pl_intr_details[] = {
5188 { F_ULP_TX, "ULP TX" },
5191 { F_CPL_SWITCH, "CPL Switch" },
5192 { F_ULP_RX, "ULP RX" },
5193 { F_PM_RX, "PM RX" },
5194 { F_PM_TX, "PM TX" },
5210 { F_NCSI, "NC-SI" },
5218 static const struct intr_info pl_perr_cause = {
5219 .name = "PL_PERR_CAUSE",
5220 .cause_reg = A_PL_PERR_CAUSE,
5221 .enable_reg = A_PL_PERR_ENABLE,
5222 .fatal = 0xffffffff,
5223 .details = pl_intr_details,
5226 static const struct intr_action pl_intr_action[] = {
5227 { F_MC1, MEM_MC1, mem_intr_handler },
5228 { F_ULP_TX, -1, ulptx_intr_handler },
5229 { F_SGE, -1, sge_intr_handler },
5230 { F_CPL_SWITCH, -1, cplsw_intr_handler },
5231 { F_ULP_RX, -1, ulprx_intr_handler },
5232 { F_PM_RX, -1, pmrx_intr_handler},
5233 { F_PM_TX, -1, pmtx_intr_handler},
5234 { F_MA, -1, ma_intr_handler },
5235 { F_TP, -1, tp_intr_handler },
5236 { F_LE, -1, le_intr_handler },
5237 { F_EDC1, MEM_EDC1, mem_intr_handler },
5238 { F_EDC0, MEM_EDC0, mem_intr_handler },
5239 { F_MC0, MEM_MC0, mem_intr_handler },
5240 { F_PCIE, -1, pcie_intr_handler },
5241 { F_MAC3, 3, mac_intr_handler},
5242 { F_MAC2, 2, mac_intr_handler},
5243 { F_MAC1, 1, mac_intr_handler},
5244 { F_MAC0, 0, mac_intr_handler},
5245 { F_SMB, -1, smb_intr_handler},
5246 { F_PL, -1, plpl_intr_handler },
5247 { F_NCSI, -1, ncsi_intr_handler},
5248 { F_MPS, -1, mps_intr_handler },
5249 { F_CIM, -1, cim_intr_handler },
5252 static const struct intr_info pl_intr_info = {
5253 .name = "PL_INT_CAUSE",
5254 .cause_reg = A_PL_INT_CAUSE,
5255 .enable_reg = A_PL_INT_ENABLE,
5257 .details = pl_intr_details,
5258 .actions = pl_intr_action,
5263 perr = t4_read_reg(adap, pl_perr_cause.cause_reg);
5264 if (verbose || perr != 0) {
5265 t4_show_intr_info(adap, &pl_perr_cause, perr);
5267 t4_write_reg(adap, pl_perr_cause.cause_reg, perr);
5269 perr |= t4_read_reg(adap, pl_intr_info.enable_reg);
5271 fatal = t4_handle_intr(adap, &pl_intr_info, perr, verbose);
5273 t4_fatal_err(adap, false);
5278 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
5281 * t4_intr_enable - enable interrupts
5282 * @adapter: the adapter whose interrupts should be enabled
5284 * Enable PF-specific interrupts for the calling function and the top-level
5285 * interrupt concentrator for global interrupts. Interrupts are already
5286 * enabled at each module, here we just enable the roots of the interrupt
5289 * Note: this function should be called only when the driver manages
5290 * non PF-specific interrupts from the various HW modules. Only one PCI
5291 * function at a time should be doing this.
5293 void t4_intr_enable(struct adapter *adap)
5297 if (chip_id(adap) <= CHELSIO_T5)
5298 val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
5300 val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN;
5301 val |= F_ERR_CPL_EXCEED_IQE_SIZE | F_ERR_INVALID_CIDX_INC |
5302 F_ERR_CPL_OPCODE_0 | F_ERR_DATA_CPL_ON_HIGH_QID1 |
5303 F_INGRESS_SIZE_ERR | F_ERR_DATA_CPL_ON_HIGH_QID0 |
5304 F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
5305 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | F_DBFIFO_LP_INT |
5307 t4_set_reg_field(adap, A_SGE_INT_ENABLE3, val, val);
5308 t4_write_reg(adap, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
5309 t4_set_reg_field(adap, A_PL_INT_ENABLE, F_SF | F_I2CM, 0);
5310 t4_set_reg_field(adap, A_PL_INT_MAP0, 0, 1 << adap->pf);
5314 * t4_intr_disable - disable interrupts
5315 * @adap: the adapter whose interrupts should be disabled
5317 * Disable interrupts. We only disable the top-level interrupt
5318 * concentrators. The caller must be a PCI function managing global
5321 void t4_intr_disable(struct adapter *adap)
5324 t4_write_reg(adap, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
5325 t4_set_reg_field(adap, A_PL_INT_MAP0, 1 << adap->pf, 0);
5329 * t4_intr_clear - clear all interrupts
5330 * @adap: the adapter whose interrupts should be cleared
5332 * Clears all interrupts. The caller must be a PCI function managing
5333 * global interrupts.
5335 void t4_intr_clear(struct adapter *adap)
5337 static const u32 cause_reg[] = {
5338 A_CIM_HOST_INT_CAUSE,
5339 A_CIM_HOST_UPACC_INT_CAUSE,
5340 MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
5342 EDC_REG(A_EDC_INT_CAUSE, 0), EDC_REG(A_EDC_INT_CAUSE, 1),
5344 A_MA_INT_WRAP_STATUS,
5345 A_MA_PARITY_ERROR_STATUS1,
5347 A_MPS_CLS_INT_CAUSE,
5348 A_MPS_RX_PERR_INT_CAUSE,
5349 A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
5350 A_MPS_STAT_PERR_INT_CAUSE_SRAM,
5351 A_MPS_TRC_INT_CAUSE,
5353 A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
5367 A_ULP_RX_INT_CAUSE_2,
5369 A_ULP_TX_INT_CAUSE_2,
5371 MYPF_REG(A_PL_PF_INT_CAUSE),
5374 const int nchan = adap->chip_params->nchan;
5376 for (i = 0; i < ARRAY_SIZE(cause_reg); i++)
5377 t4_write_reg(adap, cause_reg[i], 0xffffffff);
5380 t4_write_reg(adap, A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
5382 t4_write_reg(adap, A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
5384 t4_write_reg(adap, A_MC_INT_CAUSE, 0xffffffff);
5385 for (i = 0; i < nchan; i++) {
5386 t4_write_reg(adap, PORT_REG(i, A_XGMAC_PORT_INT_CAUSE),
5390 if (chip_id(adap) >= CHELSIO_T5) {
5391 t4_write_reg(adap, A_MA_PARITY_ERROR_STATUS2, 0xffffffff);
5392 t4_write_reg(adap, A_MPS_STAT_PERR_INT_CAUSE_SRAM1, 0xffffffff);
5393 t4_write_reg(adap, A_SGE_INT_CAUSE5, 0xffffffff);
5394 t4_write_reg(adap, A_MC_P_INT_CAUSE, 0xffffffff);
5396 t4_write_reg(adap, MC_REG(A_MC_P_INT_CAUSE, 1),
5399 for (i = 0; i < nchan; i++) {
5400 t4_write_reg(adap, T5_PORT_REG(i,
5401 A_MAC_PORT_PERR_INT_CAUSE), 0xffffffff);
5402 if (chip_id(adap) > CHELSIO_T5) {
5403 t4_write_reg(adap, T5_PORT_REG(i,
5404 A_MAC_PORT_PERR_INT_CAUSE_100G),
5407 t4_write_reg(adap, T5_PORT_REG(i, A_MAC_PORT_INT_CAUSE),
5411 if (chip_id(adap) >= CHELSIO_T6) {
5412 t4_write_reg(adap, A_SGE_INT_CAUSE6, 0xffffffff);
5415 t4_write_reg(adap, A_MPS_INT_CAUSE, is_t4(adap) ? 0 : 0xffffffff);
5416 t4_write_reg(adap, A_PL_PERR_CAUSE, 0xffffffff);
5417 t4_write_reg(adap, A_PL_INT_CAUSE, 0xffffffff);
5418 (void) t4_read_reg(adap, A_PL_INT_CAUSE); /* flush */
5422 * hash_mac_addr - return the hash value of a MAC address
5423 * @addr: the 48-bit Ethernet MAC address
5425 * Hashes a MAC address according to the hash function used by HW inexact
5426 * (hash) address matching.
5428 static int hash_mac_addr(const u8 *addr)
5430 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
5431 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
5439 * t4_config_rss_range - configure a portion of the RSS mapping table
5440 * @adapter: the adapter
5441 * @mbox: mbox to use for the FW command
5442 * @viid: virtual interface whose RSS subtable is to be written
5443 * @start: start entry in the table to write
5444 * @n: how many table entries to write
5445 * @rspq: values for the "response queue" (Ingress Queue) lookup table
5446 * @nrspq: number of values in @rspq
5448 * Programs the selected part of the VI's RSS mapping table with the
5449 * provided values. If @nrspq < @n the supplied values are used repeatedly
5450 * until the full table range is populated.
5452 * The caller must ensure the values in @rspq are in the range allowed for
5455 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
5456 int start, int n, const u16 *rspq, unsigned int nrspq)
5459 const u16 *rsp = rspq;
5460 const u16 *rsp_end = rspq + nrspq;
5461 struct fw_rss_ind_tbl_cmd cmd;
5463 memset(&cmd, 0, sizeof(cmd));
5464 cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
5465 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
5466 V_FW_RSS_IND_TBL_CMD_VIID(viid));
5467 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
5470 * Each firmware RSS command can accommodate up to 32 RSS Ingress
5471 * Queue Identifiers. These Ingress Queue IDs are packed three to
5472 * a 32-bit word as 10-bit values with the upper remaining 2 bits
5476 int nq = min(n, 32);
5478 __be32 *qp = &cmd.iq0_to_iq2;
5481 * Set up the firmware RSS command header to send the next
5482 * "nq" Ingress Queue IDs to the firmware.
5484 cmd.niqid = cpu_to_be16(nq);
5485 cmd.startidx = cpu_to_be16(start);
5488 * "nq" more done for the start of the next loop.
5494 * While there are still Ingress Queue IDs to stuff into the
5495 * current firmware RSS command, retrieve them from the
5496 * Ingress Queue ID array and insert them into the command.
5500 * Grab up to the next 3 Ingress Queue IDs (wrapping
5501 * around the Ingress Queue ID array if necessary) and
5502 * insert them into the firmware RSS command at the
5503 * current 3-tuple position within the commad.
5507 int nqbuf = min(3, nq);
5510 qbuf[0] = qbuf[1] = qbuf[2] = 0;
5511 while (nqbuf && nq_packed < 32) {
5518 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
5519 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
5520 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
5524 * Send this portion of the RRS table update to the firmware;
5525 * bail out on any errors.
5527 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
5535 * t4_config_glbl_rss - configure the global RSS mode
5536 * @adapter: the adapter
5537 * @mbox: mbox to use for the FW command
5538 * @mode: global RSS mode
5539 * @flags: mode-specific flags
5541 * Sets the global RSS mode.
5543 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
5546 struct fw_rss_glb_config_cmd c;
5548 memset(&c, 0, sizeof(c));
5549 c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
5550 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
5551 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5552 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
5553 c.u.manual.mode_pkd =
5554 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
5555 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
5556 c.u.basicvirtual.mode_keymode =
5557 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
5558 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
5561 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5565 * t4_config_vi_rss - configure per VI RSS settings
5566 * @adapter: the adapter
5567 * @mbox: mbox to use for the FW command
5570 * @defq: id of the default RSS queue for the VI.
5571 * @skeyidx: RSS secret key table index for non-global mode
5572 * @skey: RSS vf_scramble key for VI.
5574 * Configures VI-specific RSS properties.
5576 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
5577 unsigned int flags, unsigned int defq, unsigned int skeyidx,
5580 struct fw_rss_vi_config_cmd c;
5582 memset(&c, 0, sizeof(c));
5583 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
5584 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
5585 V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
5586 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5587 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
5588 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
5589 c.u.basicvirtual.secretkeyidx_pkd = cpu_to_be32(
5590 V_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(skeyidx));
5591 c.u.basicvirtual.secretkeyxor = cpu_to_be32(skey);
5593 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5596 /* Read an RSS table row */
5597 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
5599 t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
5600 return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
5605 * t4_read_rss - read the contents of the RSS mapping table
5606 * @adapter: the adapter
5607 * @map: holds the contents of the RSS mapping table
5609 * Reads the contents of the RSS hash->queue mapping table.
5611 int t4_read_rss(struct adapter *adapter, u16 *map)
5616 for (i = 0; i < RSS_NENTRIES / 2; ++i) {
5617 ret = rd_rss_row(adapter, i, &val);
5620 *map++ = G_LKPTBLQUEUE0(val);
5621 *map++ = G_LKPTBLQUEUE1(val);
5627 * t4_tp_fw_ldst_rw - Access TP indirect register through LDST
5628 * @adap: the adapter
5629 * @cmd: TP fw ldst address space type
5630 * @vals: where the indirect register values are stored/written
5631 * @nregs: how many indirect registers to read/write
5632 * @start_idx: index of first indirect register to read/write
5633 * @rw: Read (1) or Write (0)
5634 * @sleep_ok: if true we may sleep while awaiting command completion
5636 * Access TP indirect registers through LDST
5638 static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals,
5639 unsigned int nregs, unsigned int start_index,
5640 unsigned int rw, bool sleep_ok)
5644 struct fw_ldst_cmd c;
5646 for (i = 0; i < nregs; i++) {
5647 memset(&c, 0, sizeof(c));
5648 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
5650 (rw ? F_FW_CMD_READ :
5652 V_FW_LDST_CMD_ADDRSPACE(cmd));
5653 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5655 c.u.addrval.addr = cpu_to_be32(start_index + i);
5656 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
5657 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c,
5663 vals[i] = be32_to_cpu(c.u.addrval.val);
5669 * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor
5670 * @adap: the adapter
5671 * @reg_addr: Address Register
5672 * @reg_data: Data register
5673 * @buff: where the indirect register values are stored/written
5674 * @nregs: how many indirect registers to read/write
5675 * @start_index: index of first indirect register to read/write
5676 * @rw: READ(1) or WRITE(0)
5677 * @sleep_ok: if true we may sleep while awaiting command completion
5679 * Read/Write TP indirect registers through LDST if possible.
5680 * Else, use backdoor access
5682 static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data,
5683 u32 *buff, u32 nregs, u32 start_index, int rw,
5691 cmd = FW_LDST_ADDRSPC_TP_PIO;
5693 case A_TP_TM_PIO_ADDR:
5694 cmd = FW_LDST_ADDRSPC_TP_TM_PIO;
5696 case A_TP_MIB_INDEX:
5697 cmd = FW_LDST_ADDRSPC_TP_MIB;
5700 goto indirect_access;
5703 if (t4_use_ldst(adap))
5704 rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw,
5711 t4_read_indirect(adap, reg_addr, reg_data, buff, nregs,
5714 t4_write_indirect(adap, reg_addr, reg_data, buff, nregs,
5720 * t4_tp_pio_read - Read TP PIO registers
5721 * @adap: the adapter
5722 * @buff: where the indirect register values are written
5723 * @nregs: how many indirect registers to read
5724 * @start_index: index of first indirect register to read
5725 * @sleep_ok: if true we may sleep while awaiting command completion
5727 * Read TP PIO Registers
5729 void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5730 u32 start_index, bool sleep_ok)
5732 t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs,
5733 start_index, 1, sleep_ok);
5737 * t4_tp_pio_write - Write TP PIO registers
5738 * @adap: the adapter
5739 * @buff: where the indirect register values are stored
5740 * @nregs: how many indirect registers to write
5741 * @start_index: index of first indirect register to write
5742 * @sleep_ok: if true we may sleep while awaiting command completion
5744 * Write TP PIO Registers
5746 void t4_tp_pio_write(struct adapter *adap, const u32 *buff, u32 nregs,
5747 u32 start_index, bool sleep_ok)
5749 t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5750 __DECONST(u32 *, buff), nregs, start_index, 0, sleep_ok);
5754 * t4_tp_tm_pio_read - Read TP TM PIO registers
5755 * @adap: the adapter
5756 * @buff: where the indirect register values are written
5757 * @nregs: how many indirect registers to read
5758 * @start_index: index of first indirect register to read
5759 * @sleep_ok: if true we may sleep while awaiting command completion
5761 * Read TP TM PIO Registers
5763 void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5764 u32 start_index, bool sleep_ok)
5766 t4_tp_indirect_rw(adap, A_TP_TM_PIO_ADDR, A_TP_TM_PIO_DATA, buff,
5767 nregs, start_index, 1, sleep_ok);
5771 * t4_tp_mib_read - Read TP MIB registers
5772 * @adap: the adapter
5773 * @buff: where the indirect register values are written
5774 * @nregs: how many indirect registers to read
5775 * @start_index: index of first indirect register to read
5776 * @sleep_ok: if true we may sleep while awaiting command completion
5778 * Read TP MIB Registers
5780 void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index,
5783 t4_tp_indirect_rw(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, buff, nregs,
5784 start_index, 1, sleep_ok);
5788 * t4_read_rss_key - read the global RSS key
5789 * @adap: the adapter
5790 * @key: 10-entry array holding the 320-bit RSS key
5791 * @sleep_ok: if true we may sleep while awaiting command completion
5793 * Reads the global 320-bit RSS key.
5795 void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok)
5797 t4_tp_pio_read(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
5801 * t4_write_rss_key - program one of the RSS keys
5802 * @adap: the adapter
5803 * @key: 10-entry array holding the 320-bit RSS key
5804 * @idx: which RSS key to write
5805 * @sleep_ok: if true we may sleep while awaiting command completion
5807 * Writes one of the RSS keys with the given 320-bit value. If @idx is
5808 * 0..15 the corresponding entry in the RSS key table is written,
5809 * otherwise the global RSS key is written.
5811 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
5814 u8 rss_key_addr_cnt = 16;
5815 u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
5818 * T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
5819 * allows access to key addresses 16-63 by using KeyWrAddrX
5820 * as index[5:4](upper 2) into key table
5822 if ((chip_id(adap) > CHELSIO_T5) &&
5823 (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
5824 rss_key_addr_cnt = 32;
5826 t4_tp_pio_write(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
5828 if (idx >= 0 && idx < rss_key_addr_cnt) {
5829 if (rss_key_addr_cnt > 16)
5830 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
5831 vrt | V_KEYWRADDRX(idx >> 4) |
5832 V_T6_VFWRADDR(idx) | F_KEYWREN);
5834 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
5835 vrt| V_KEYWRADDR(idx) | F_KEYWREN);
5840 * t4_read_rss_pf_config - read PF RSS Configuration Table
5841 * @adapter: the adapter
5842 * @index: the entry in the PF RSS table to read
5843 * @valp: where to store the returned value
5844 * @sleep_ok: if true we may sleep while awaiting command completion
5846 * Reads the PF RSS Configuration Table at the specified index and returns
5847 * the value found there.
5849 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
5850 u32 *valp, bool sleep_ok)
5852 t4_tp_pio_read(adapter, valp, 1, A_TP_RSS_PF0_CONFIG + index, sleep_ok);
5856 * t4_write_rss_pf_config - write PF RSS Configuration Table
5857 * @adapter: the adapter
5858 * @index: the entry in the VF RSS table to read
5859 * @val: the value to store
5860 * @sleep_ok: if true we may sleep while awaiting command completion
5862 * Writes the PF RSS Configuration Table at the specified index with the
5865 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index,
5866 u32 val, bool sleep_ok)
5868 t4_tp_pio_write(adapter, &val, 1, A_TP_RSS_PF0_CONFIG + index,
5873 * t4_read_rss_vf_config - read VF RSS Configuration Table
5874 * @adapter: the adapter
5875 * @index: the entry in the VF RSS table to read
5876 * @vfl: where to store the returned VFL
5877 * @vfh: where to store the returned VFH
5878 * @sleep_ok: if true we may sleep while awaiting command completion
5880 * Reads the VF RSS Configuration Table at the specified index and returns
5881 * the (VFL, VFH) values found there.
5883 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
5884 u32 *vfl, u32 *vfh, bool sleep_ok)
5886 u32 vrt, mask, data;
5888 if (chip_id(adapter) <= CHELSIO_T5) {
5889 mask = V_VFWRADDR(M_VFWRADDR);
5890 data = V_VFWRADDR(index);
5892 mask = V_T6_VFWRADDR(M_T6_VFWRADDR);
5893 data = V_T6_VFWRADDR(index);
5896 * Request that the index'th VF Table values be read into VFL/VFH.
5898 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
5899 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
5900 vrt |= data | F_VFRDEN;
5901 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
5904 * Grab the VFL/VFH values ...
5906 t4_tp_pio_read(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok);
5907 t4_tp_pio_read(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok);
5911 * t4_write_rss_vf_config - write VF RSS Configuration Table
5913 * @adapter: the adapter
5914 * @index: the entry in the VF RSS table to write
5915 * @vfl: the VFL to store
5916 * @vfh: the VFH to store
5918 * Writes the VF RSS Configuration Table at the specified index with the
5919 * specified (VFL, VFH) values.
5921 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
5922 u32 vfl, u32 vfh, bool sleep_ok)
5924 u32 vrt, mask, data;
5926 if (chip_id(adapter) <= CHELSIO_T5) {
5927 mask = V_VFWRADDR(M_VFWRADDR);
5928 data = V_VFWRADDR(index);
5930 mask = V_T6_VFWRADDR(M_T6_VFWRADDR);
5931 data = V_T6_VFWRADDR(index);
5935 * Load up VFL/VFH with the values to be written ...
5937 t4_tp_pio_write(adapter, &vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok);
5938 t4_tp_pio_write(adapter, &vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok);
5941 * Write the VFL/VFH into the VF Table at index'th location.
5943 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
5944 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
5945 vrt |= data | F_VFRDEN;
5946 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
5950 * t4_read_rss_pf_map - read PF RSS Map
5951 * @adapter: the adapter
5952 * @sleep_ok: if true we may sleep while awaiting command completion
5954 * Reads the PF RSS Map register and returns its value.
5956 u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok)
5960 t4_tp_pio_read(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok);
5966 * t4_write_rss_pf_map - write PF RSS Map
5967 * @adapter: the adapter
5968 * @pfmap: PF RSS Map value
5970 * Writes the specified value to the PF RSS Map register.
5972 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap, bool sleep_ok)
5974 t4_tp_pio_write(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok);
5978 * t4_read_rss_pf_mask - read PF RSS Mask
5979 * @adapter: the adapter
5980 * @sleep_ok: if true we may sleep while awaiting command completion
5982 * Reads the PF RSS Mask register and returns its value.
5984 u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok)
5988 t4_tp_pio_read(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok);
5994 * t4_write_rss_pf_mask - write PF RSS Mask
5995 * @adapter: the adapter
5996 * @pfmask: PF RSS Mask value
5998 * Writes the specified value to the PF RSS Mask register.
6000 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask, bool sleep_ok)
6002 t4_tp_pio_write(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok);
6006 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
6007 * @adap: the adapter
6008 * @v4: holds the TCP/IP counter values
6009 * @v6: holds the TCP/IPv6 counter values
6010 * @sleep_ok: if true we may sleep while awaiting command completion
6012 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
6013 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
6015 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
6016 struct tp_tcp_stats *v6, bool sleep_ok)
6018 u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
6020 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
6021 #define STAT(x) val[STAT_IDX(x)]
6022 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
6025 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
6026 A_TP_MIB_TCP_OUT_RST, sleep_ok);
6027 v4->tcp_out_rsts = STAT(OUT_RST);
6028 v4->tcp_in_segs = STAT64(IN_SEG);
6029 v4->tcp_out_segs = STAT64(OUT_SEG);
6030 v4->tcp_retrans_segs = STAT64(RXT_SEG);
6033 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
6034 A_TP_MIB_TCP_V6OUT_RST, sleep_ok);
6035 v6->tcp_out_rsts = STAT(OUT_RST);
6036 v6->tcp_in_segs = STAT64(IN_SEG);
6037 v6->tcp_out_segs = STAT64(OUT_SEG);
6038 v6->tcp_retrans_segs = STAT64(RXT_SEG);
6046 * t4_tp_get_err_stats - read TP's error MIB counters
6047 * @adap: the adapter
6048 * @st: holds the counter values
6049 * @sleep_ok: if true we may sleep while awaiting command completion
6051 * Returns the values of TP's error counters.
6053 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
6056 int nchan = adap->chip_params->nchan;
6058 t4_tp_mib_read(adap, st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0,
6061 t4_tp_mib_read(adap, st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0,
6064 t4_tp_mib_read(adap, st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0,
6067 t4_tp_mib_read(adap, st->tnl_cong_drops, nchan,
6068 A_TP_MIB_TNL_CNG_DROP_0, sleep_ok);
6070 t4_tp_mib_read(adap, st->ofld_chan_drops, nchan,
6071 A_TP_MIB_OFD_CHN_DROP_0, sleep_ok);
6073 t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0,
6076 t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan,
6077 A_TP_MIB_OFD_VLN_DROP_0, sleep_ok);
6079 t4_tp_mib_read(adap, st->tcp6_in_errs, nchan,
6080 A_TP_MIB_TCP_V6IN_ERR_0, sleep_ok);
6082 t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP,
6087 * t4_tp_get_proxy_stats - read TP's proxy MIB counters
6088 * @adap: the adapter
6089 * @st: holds the counter values
6091 * Returns the values of TP's proxy counters.
6093 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st,
6096 int nchan = adap->chip_params->nchan;
6098 t4_tp_mib_read(adap, st->proxy, nchan, A_TP_MIB_TNL_LPBK_0, sleep_ok);
6102 * t4_tp_get_cpl_stats - read TP's CPL MIB counters
6103 * @adap: the adapter
6104 * @st: holds the counter values
6105 * @sleep_ok: if true we may sleep while awaiting command completion
6107 * Returns the values of TP's CPL counters.
6109 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
6112 int nchan = adap->chip_params->nchan;
6114 t4_tp_mib_read(adap, st->req, nchan, A_TP_MIB_CPL_IN_REQ_0, sleep_ok);
6116 t4_tp_mib_read(adap, st->rsp, nchan, A_TP_MIB_CPL_OUT_RSP_0, sleep_ok);
6120 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
6121 * @adap: the adapter
6122 * @st: holds the counter values
6124 * Returns the values of TP's RDMA counters.
6126 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
6129 t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, A_TP_MIB_RQE_DFR_PKT,
6134 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
6135 * @adap: the adapter
6136 * @idx: the port index
6137 * @st: holds the counter values
6138 * @sleep_ok: if true we may sleep while awaiting command completion
6140 * Returns the values of TP's FCoE counters for the selected port.
6142 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
6143 struct tp_fcoe_stats *st, bool sleep_ok)
6147 t4_tp_mib_read(adap, &st->frames_ddp, 1, A_TP_MIB_FCOE_DDP_0 + idx,
6150 t4_tp_mib_read(adap, &st->frames_drop, 1,
6151 A_TP_MIB_FCOE_DROP_0 + idx, sleep_ok);
6153 t4_tp_mib_read(adap, val, 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx,
6156 st->octets_ddp = ((u64)val[0] << 32) | val[1];
6160 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
6161 * @adap: the adapter
6162 * @st: holds the counter values
6163 * @sleep_ok: if true we may sleep while awaiting command completion
6165 * Returns the values of TP's counters for non-TCP directly-placed packets.
6167 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
6172 t4_tp_mib_read(adap, val, 4, A_TP_MIB_USM_PKTS, sleep_ok);
6174 st->frames = val[0];
6176 st->octets = ((u64)val[2] << 32) | val[3];
6180 * t4_read_mtu_tbl - returns the values in the HW path MTU table
6181 * @adap: the adapter
6182 * @mtus: where to store the MTU values
6183 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
6185 * Reads the HW path MTU table.
6187 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
6192 for (i = 0; i < NMTUS; ++i) {
6193 t4_write_reg(adap, A_TP_MTU_TABLE,
6194 V_MTUINDEX(0xff) | V_MTUVALUE(i));
6195 v = t4_read_reg(adap, A_TP_MTU_TABLE);
6196 mtus[i] = G_MTUVALUE(v);
6198 mtu_log[i] = G_MTUWIDTH(v);
6203 * t4_read_cong_tbl - reads the congestion control table
6204 * @adap: the adapter
6205 * @incr: where to store the alpha values
6207 * Reads the additive increments programmed into the HW congestion
6210 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
6212 unsigned int mtu, w;
6214 for (mtu = 0; mtu < NMTUS; ++mtu)
6215 for (w = 0; w < NCCTRL_WIN; ++w) {
6216 t4_write_reg(adap, A_TP_CCTRL_TABLE,
6217 V_ROWINDEX(0xffff) | (mtu << 5) | w);
6218 incr[mtu][w] = (u16)t4_read_reg(adap,
6219 A_TP_CCTRL_TABLE) & 0x1fff;
6224 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
6225 * @adap: the adapter
6226 * @addr: the indirect TP register address
6227 * @mask: specifies the field within the register to modify
6228 * @val: new value for the field
6230 * Sets a field of an indirect TP register to the given value.
6232 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
6233 unsigned int mask, unsigned int val)
6235 t4_write_reg(adap, A_TP_PIO_ADDR, addr);
6236 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
6237 t4_write_reg(adap, A_TP_PIO_DATA, val);
6241 * init_cong_ctrl - initialize congestion control parameters
6242 * @a: the alpha values for congestion control
6243 * @b: the beta values for congestion control
6245 * Initialize the congestion control parameters.
6247 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
6249 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
6274 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
6277 b[13] = b[14] = b[15] = b[16] = 3;
6278 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
6279 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
6284 /* The minimum additive increment value for the congestion control table */
6285 #define CC_MIN_INCR 2U
6288 * t4_load_mtus - write the MTU and congestion control HW tables
6289 * @adap: the adapter
6290 * @mtus: the values for the MTU table
6291 * @alpha: the values for the congestion control alpha parameter
6292 * @beta: the values for the congestion control beta parameter
6294 * Write the HW MTU table with the supplied MTUs and the high-speed
6295 * congestion control table with the supplied alpha, beta, and MTUs.
6296 * We write the two tables together because the additive increments
6297 * depend on the MTUs.
6299 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
6300 const unsigned short *alpha, const unsigned short *beta)
6302 static const unsigned int avg_pkts[NCCTRL_WIN] = {
6303 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
6304 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
6305 28672, 40960, 57344, 81920, 114688, 163840, 229376
6310 for (i = 0; i < NMTUS; ++i) {
6311 unsigned int mtu = mtus[i];
6312 unsigned int log2 = fls(mtu);
6314 if (!(mtu & ((1 << log2) >> 2))) /* round */
6316 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
6317 V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
6319 for (w = 0; w < NCCTRL_WIN; ++w) {
6322 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
6325 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
6326 (w << 16) | (beta[w] << 13) | inc);
6332 * t4_set_pace_tbl - set the pace table
6333 * @adap: the adapter
6334 * @pace_vals: the pace values in microseconds
6335 * @start: index of the first entry in the HW pace table to set
6336 * @n: how many entries to set
6338 * Sets (a subset of the) HW pace table.
6340 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
6341 unsigned int start, unsigned int n)
6343 unsigned int vals[NTX_SCHED], i;
6344 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
6349 /* convert values from us to dack ticks, rounding to closest value */
6350 for (i = 0; i < n; i++, pace_vals++) {
6351 vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
6352 if (vals[i] > 0x7ff)
6354 if (*pace_vals && vals[i] == 0)
6357 for (i = 0; i < n; i++, start++)
6358 t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
6363 * t4_set_sched_bps - set the bit rate for a HW traffic scheduler
6364 * @adap: the adapter
6365 * @kbps: target rate in Kbps
6366 * @sched: the scheduler index
6368 * Configure a Tx HW scheduler for the target rate.
6370 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
6372 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
6373 unsigned int clk = adap->params.vpd.cclk * 1000;
6374 unsigned int selected_cpt = 0, selected_bpt = 0;
6377 kbps *= 125; /* -> bytes */
6378 for (cpt = 1; cpt <= 255; cpt++) {
6380 bpt = (kbps + tps / 2) / tps;
6381 if (bpt > 0 && bpt <= 255) {
6383 delta = v >= kbps ? v - kbps : kbps - v;
6384 if (delta < mindelta) {
6389 } else if (selected_cpt)
6395 t4_write_reg(adap, A_TP_TM_PIO_ADDR,
6396 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
6397 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
6399 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
6401 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
6402 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
6407 * t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
6408 * @adap: the adapter
6409 * @sched: the scheduler index
6410 * @ipg: the interpacket delay in tenths of nanoseconds
6412 * Set the interpacket delay for a HW packet rate scheduler.
6414 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
6416 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
6418 /* convert ipg to nearest number of core clocks */
6419 ipg *= core_ticks_per_usec(adap);
6420 ipg = (ipg + 5000) / 10000;
6421 if (ipg > M_TXTIMERSEPQ0)
6424 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
6425 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
6427 v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
6429 v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
6430 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
6431 t4_read_reg(adap, A_TP_TM_PIO_DATA);
6436 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
6437 * clocks. The formula is
6439 * bytes/s = bytes256 * 256 * ClkFreq / 4096
6441 * which is equivalent to
6443 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
6445 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
6447 u64 v = (u64)bytes256 * adap->params.vpd.cclk;
6449 return v * 62 + v / 2;
6453 * t4_get_chan_txrate - get the current per channel Tx rates
6454 * @adap: the adapter
6455 * @nic_rate: rates for NIC traffic
6456 * @ofld_rate: rates for offloaded traffic
6458 * Return the current Tx rates in bytes/s for NIC and offloaded traffic
6461 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
6465 v = t4_read_reg(adap, A_TP_TX_TRATE);
6466 nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
6467 nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
6468 if (adap->chip_params->nchan > 2) {
6469 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
6470 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
6473 v = t4_read_reg(adap, A_TP_TX_ORATE);
6474 ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
6475 ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
6476 if (adap->chip_params->nchan > 2) {
6477 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
6478 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
6483 * t4_set_trace_filter - configure one of the tracing filters
6484 * @adap: the adapter
6485 * @tp: the desired trace filter parameters
6486 * @idx: which filter to configure
6487 * @enable: whether to enable or disable the filter
6489 * Configures one of the tracing filters available in HW. If @tp is %NULL
6490 * it indicates that the filter is already written in the register and it
6491 * just needs to be enabled or disabled.
6493 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
6494 int idx, int enable)
6496 int i, ofst = idx * 4;
6497 u32 data_reg, mask_reg, cfg;
6498 u32 multitrc = F_TRCMULTIFILTER;
6499 u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN;
6501 if (idx < 0 || idx >= NTRACE)
6504 if (tp == NULL || !enable) {
6505 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en,
6511 * TODO - After T4 data book is updated, specify the exact
6514 * See T4 data book - MPS section for a complete description
6515 * of the below if..else handling of A_MPS_TRC_CFG register
6518 cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
6519 if (cfg & F_TRCMULTIFILTER) {
6521 * If multiple tracers are enabled, then maximum
6522 * capture size is 2.5KB (FIFO size of a single channel)
6523 * minus 2 flits for CPL_TRACE_PKT header.
6525 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
6529 * If multiple tracers are disabled, to avoid deadlocks
6530 * maximum packet capture size of 9600 bytes is recommended.
6531 * Also in this mode, only trace0 can be enabled and running.
6534 if (tp->snap_len > 9600 || idx)
6538 if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 ||
6539 tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET ||
6540 tp->min_len > M_TFMINPKTSIZE)
6543 /* stop the tracer we'll be changing */
6544 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0);
6546 idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
6547 data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
6548 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
6550 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
6551 t4_write_reg(adap, data_reg, tp->data[i]);
6552 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
6554 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
6555 V_TFCAPTUREMAX(tp->snap_len) |
6556 V_TFMINPKTSIZE(tp->min_len));
6557 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
6558 V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en |
6560 V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) :
6561 V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert)));
6567 * t4_get_trace_filter - query one of the tracing filters
6568 * @adap: the adapter
6569 * @tp: the current trace filter parameters
6570 * @idx: which trace filter to query
6571 * @enabled: non-zero if the filter is enabled
6573 * Returns the current settings of one of the HW tracing filters.
6575 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
6579 int i, ofst = idx * 4;
6580 u32 data_reg, mask_reg;
6582 ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
6583 ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
6586 *enabled = !!(ctla & F_TFEN);
6587 tp->port = G_TFPORT(ctla);
6588 tp->invert = !!(ctla & F_TFINVERTMATCH);
6590 *enabled = !!(ctla & F_T5_TFEN);
6591 tp->port = G_T5_TFPORT(ctla);
6592 tp->invert = !!(ctla & F_T5_TFINVERTMATCH);
6594 tp->snap_len = G_TFCAPTUREMAX(ctlb);
6595 tp->min_len = G_TFMINPKTSIZE(ctlb);
6596 tp->skip_ofst = G_TFOFFSET(ctla);
6597 tp->skip_len = G_TFLENGTH(ctla);
6599 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
6600 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
6601 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
6603 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
6604 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
6605 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
6610 * t4_pmtx_get_stats - returns the HW stats from PMTX
6611 * @adap: the adapter
6612 * @cnt: where to store the count statistics
6613 * @cycles: where to store the cycle statistics
6615 * Returns performance statistics from PMTX.
6617 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
6622 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
6623 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
6624 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
6626 cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
6628 t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
6629 A_PM_TX_DBG_DATA, data, 2,
6630 A_PM_TX_DBG_STAT_MSB);
6631 cycles[i] = (((u64)data[0] << 32) | data[1]);
6637 * t4_pmrx_get_stats - returns the HW stats from PMRX
6638 * @adap: the adapter
6639 * @cnt: where to store the count statistics
6640 * @cycles: where to store the cycle statistics
6642 * Returns performance statistics from PMRX.
6644 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
6649 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
6650 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
6651 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
6653 cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
6655 t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
6656 A_PM_RX_DBG_DATA, data, 2,
6657 A_PM_RX_DBG_STAT_MSB);
6658 cycles[i] = (((u64)data[0] << 32) | data[1]);
6664 * t4_get_mps_bg_map - return the buffer groups associated with a port
6665 * @adap: the adapter
6666 * @idx: the port index
6668 * Returns a bitmap indicating which MPS buffer groups are associated
6669 * with the given port. Bit i is set if buffer group i is used by the
6672 static unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
6676 if (adap->params.mps_bg_map)
6677 return ((adap->params.mps_bg_map >> (idx << 3)) & 0xff);
6679 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
6681 return idx == 0 ? 0xf : 0;
6682 if (n == 1 && chip_id(adap) <= CHELSIO_T5)
6683 return idx < 2 ? (3 << (2 * idx)) : 0;
6688 * TP RX e-channels associated with the port.
6690 static unsigned int t4_get_rx_e_chan_map(struct adapter *adap, int idx)
6692 u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
6695 return idx == 0 ? 0xf : 0;
6696 if (n == 1 && chip_id(adap) <= CHELSIO_T5)
6697 return idx < 2 ? (3 << (2 * idx)) : 0;
6702 * t4_get_port_type_description - return Port Type string description
6703 * @port_type: firmware Port Type enumeration
6705 const char *t4_get_port_type_description(enum fw_port_type port_type)
6707 static const char *const port_type_description[] = {
6732 if (port_type < ARRAY_SIZE(port_type_description))
6733 return port_type_description[port_type];
6738 * t4_get_port_stats_offset - collect port stats relative to a previous
6740 * @adap: The adapter
6742 * @stats: Current stats to fill
6743 * @offset: Previous stats snapshot
6745 void t4_get_port_stats_offset(struct adapter *adap, int idx,
6746 struct port_stats *stats,
6747 struct port_stats *offset)
6752 t4_get_port_stats(adap, idx, stats);
6753 for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
6754 i < (sizeof(struct port_stats)/sizeof(u64)) ;
6760 * t4_get_port_stats - collect port statistics
6761 * @adap: the adapter
6762 * @idx: the port index
6763 * @p: the stats structure to fill
6765 * Collect statistics related to the given port from HW.
6767 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
6769 u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map;
6770 u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
6772 #define GET_STAT(name) \
6773 t4_read_reg64(adap, \
6774 (is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
6775 T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
6776 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
6778 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
6779 p->tx_octets = GET_STAT(TX_PORT_BYTES);
6780 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
6781 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
6782 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
6783 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
6784 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
6785 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
6786 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
6787 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
6788 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
6789 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
6790 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
6791 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
6792 p->tx_drop = GET_STAT(TX_PORT_DROP);
6793 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
6794 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
6795 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
6796 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
6797 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
6798 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
6799 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
6800 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
6802 if (chip_id(adap) >= CHELSIO_T5) {
6803 if (stat_ctl & F_COUNTPAUSESTATTX) {
6804 p->tx_frames -= p->tx_pause;
6805 p->tx_octets -= p->tx_pause * 64;
6807 if (stat_ctl & F_COUNTPAUSEMCTX)
6808 p->tx_mcast_frames -= p->tx_pause;
6811 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
6812 p->rx_octets = GET_STAT(RX_PORT_BYTES);
6813 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
6814 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
6815 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
6816 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
6817 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
6818 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
6819 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
6820 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
6821 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
6822 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
6823 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
6824 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
6825 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
6826 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
6827 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
6828 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
6829 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
6830 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
6831 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
6832 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
6833 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
6834 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
6835 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
6836 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
6837 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
6839 if (chip_id(adap) >= CHELSIO_T5) {
6840 if (stat_ctl & F_COUNTPAUSESTATRX) {
6841 p->rx_frames -= p->rx_pause;
6842 p->rx_octets -= p->rx_pause * 64;
6844 if (stat_ctl & F_COUNTPAUSEMCRX)
6845 p->rx_mcast_frames -= p->rx_pause;
6848 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
6849 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
6850 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
6851 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
6852 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
6853 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
6854 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
6855 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
6862 * t4_get_lb_stats - collect loopback port statistics
6863 * @adap: the adapter
6864 * @idx: the loopback port index
6865 * @p: the stats structure to fill
6867 * Return HW statistics for the given loopback port.
6869 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
6871 u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map;
6873 #define GET_STAT(name) \
6874 t4_read_reg64(adap, \
6876 PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
6877 T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
6878 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
6880 p->octets = GET_STAT(BYTES);
6881 p->frames = GET_STAT(FRAMES);
6882 p->bcast_frames = GET_STAT(BCAST);
6883 p->mcast_frames = GET_STAT(MCAST);
6884 p->ucast_frames = GET_STAT(UCAST);
6885 p->error_frames = GET_STAT(ERROR);
6887 p->frames_64 = GET_STAT(64B);
6888 p->frames_65_127 = GET_STAT(65B_127B);
6889 p->frames_128_255 = GET_STAT(128B_255B);
6890 p->frames_256_511 = GET_STAT(256B_511B);
6891 p->frames_512_1023 = GET_STAT(512B_1023B);
6892 p->frames_1024_1518 = GET_STAT(1024B_1518B);
6893 p->frames_1519_max = GET_STAT(1519B_MAX);
6894 p->drop = GET_STAT(DROP_FRAMES);
6896 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
6897 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
6898 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
6899 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
6900 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
6901 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
6902 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
6903 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
6910 * t4_wol_magic_enable - enable/disable magic packet WoL
6911 * @adap: the adapter
6912 * @port: the physical port index
6913 * @addr: MAC address expected in magic packets, %NULL to disable
6915 * Enables/disables magic packet wake-on-LAN for the selected port.
6917 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
6920 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
6923 mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO);
6924 mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI);
6925 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
6927 mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO);
6928 mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI);
6929 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
6933 t4_write_reg(adap, mag_id_reg_l,
6934 (addr[2] << 24) | (addr[3] << 16) |
6935 (addr[4] << 8) | addr[5]);
6936 t4_write_reg(adap, mag_id_reg_h,
6937 (addr[0] << 8) | addr[1]);
6939 t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN,
6940 V_MAGICEN(addr != NULL));
6944 * t4_wol_pat_enable - enable/disable pattern-based WoL
6945 * @adap: the adapter
6946 * @port: the physical port index
6947 * @map: bitmap of which HW pattern filters to set
6948 * @mask0: byte mask for bytes 0-63 of a packet
6949 * @mask1: byte mask for bytes 64-127 of a packet
6950 * @crc: Ethernet CRC for selected bytes
6951 * @enable: enable/disable switch
6953 * Sets the pattern filters indicated in @map to mask out the bytes
6954 * specified in @mask0/@mask1 in received packets and compare the CRC of
6955 * the resulting packet against @crc. If @enable is %true pattern-based
6956 * WoL is enabled, otherwise disabled.
6958 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
6959 u64 mask0, u64 mask1, unsigned int crc, bool enable)
6965 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
6967 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
6970 t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
6976 #define EPIO_REG(name) \
6977 (is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \
6978 T5_PORT_REG(port, A_MAC_PORT_EPIO_##name))
6980 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
6981 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
6982 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
6984 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
6988 /* write byte masks */
6989 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
6990 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
6991 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
6992 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
6996 t4_write_reg(adap, EPIO_REG(DATA0), crc);
6997 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
6998 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
6999 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
7004 t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN);
7008 /* t4_mk_filtdelwr - create a delete filter WR
7009 * @ftid: the filter ID
7010 * @wr: the filter work request to populate
7011 * @qid: ingress queue to receive the delete notification
7013 * Creates a filter work request to delete the supplied filter. If @qid is
7014 * negative the delete notification is suppressed.
7016 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
7018 memset(wr, 0, sizeof(*wr));
7019 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
7020 wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
7021 wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
7022 V_FW_FILTER_WR_NOREPLY(qid < 0));
7023 wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
7025 wr->rx_chan_rx_rpl_iq =
7026 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
7029 #define INIT_CMD(var, cmd, rd_wr) do { \
7030 (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
7031 F_FW_CMD_REQUEST | \
7032 F_FW_CMD_##rd_wr); \
7033 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
7036 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
7040 struct fw_ldst_cmd c;
7042 memset(&c, 0, sizeof(c));
7043 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE);
7044 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
7048 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
7049 c.u.addrval.addr = cpu_to_be32(addr);
7050 c.u.addrval.val = cpu_to_be32(val);
7052 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7056 * t4_mdio_rd - read a PHY register through MDIO
7057 * @adap: the adapter
7058 * @mbox: mailbox to use for the FW command
7059 * @phy_addr: the PHY address
7060 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
7061 * @reg: the register to read
7062 * @valp: where to store the value
7064 * Issues a FW command through the given mailbox to read a PHY register.
7066 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
7067 unsigned int mmd, unsigned int reg, unsigned int *valp)
7071 struct fw_ldst_cmd c;
7073 memset(&c, 0, sizeof(c));
7074 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
7075 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
7076 F_FW_CMD_REQUEST | F_FW_CMD_READ |
7078 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
7079 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
7080 V_FW_LDST_CMD_MMD(mmd));
7081 c.u.mdio.raddr = cpu_to_be16(reg);
7083 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7085 *valp = be16_to_cpu(c.u.mdio.rval);
7090 * t4_mdio_wr - write a PHY register through MDIO
7091 * @adap: the adapter
7092 * @mbox: mailbox to use for the FW command
7093 * @phy_addr: the PHY address
7094 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
7095 * @reg: the register to write
7096 * @valp: value to write
7098 * Issues a FW command through the given mailbox to write a PHY register.
7100 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
7101 unsigned int mmd, unsigned int reg, unsigned int val)
7104 struct fw_ldst_cmd c;
7106 memset(&c, 0, sizeof(c));
7107 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
7108 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
7109 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7111 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
7112 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
7113 V_FW_LDST_CMD_MMD(mmd));
7114 c.u.mdio.raddr = cpu_to_be16(reg);
7115 c.u.mdio.rval = cpu_to_be16(val);
7117 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7122 * t4_sge_decode_idma_state - decode the idma state
7123 * @adap: the adapter
7124 * @state: the state idma is stuck in
7126 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
7128 static const char * const t4_decode[] = {
7130 "IDMA_PUSH_MORE_CPL_FIFO",
7131 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
7133 "IDMA_PHYSADDR_SEND_PCIEHDR",
7134 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
7135 "IDMA_PHYSADDR_SEND_PAYLOAD",
7136 "IDMA_SEND_FIFO_TO_IMSG",
7137 "IDMA_FL_REQ_DATA_FL_PREP",
7138 "IDMA_FL_REQ_DATA_FL",
7140 "IDMA_FL_H_REQ_HEADER_FL",
7141 "IDMA_FL_H_SEND_PCIEHDR",
7142 "IDMA_FL_H_PUSH_CPL_FIFO",
7143 "IDMA_FL_H_SEND_CPL",
7144 "IDMA_FL_H_SEND_IP_HDR_FIRST",
7145 "IDMA_FL_H_SEND_IP_HDR",
7146 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
7147 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
7148 "IDMA_FL_H_SEND_IP_HDR_PADDING",
7149 "IDMA_FL_D_SEND_PCIEHDR",
7150 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
7151 "IDMA_FL_D_REQ_NEXT_DATA_FL",
7152 "IDMA_FL_SEND_PCIEHDR",
7153 "IDMA_FL_PUSH_CPL_FIFO",
7155 "IDMA_FL_SEND_PAYLOAD_FIRST",
7156 "IDMA_FL_SEND_PAYLOAD",
7157 "IDMA_FL_REQ_NEXT_DATA_FL",
7158 "IDMA_FL_SEND_NEXT_PCIEHDR",
7159 "IDMA_FL_SEND_PADDING",
7160 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
7161 "IDMA_FL_SEND_FIFO_TO_IMSG",
7162 "IDMA_FL_REQ_DATAFL_DONE",
7163 "IDMA_FL_REQ_HEADERFL_DONE",
7165 static const char * const t5_decode[] = {
7168 "IDMA_PUSH_MORE_CPL_FIFO",
7169 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
7170 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
7171 "IDMA_PHYSADDR_SEND_PCIEHDR",
7172 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
7173 "IDMA_PHYSADDR_SEND_PAYLOAD",
7174 "IDMA_SEND_FIFO_TO_IMSG",
7175 "IDMA_FL_REQ_DATA_FL",
7177 "IDMA_FL_DROP_SEND_INC",
7178 "IDMA_FL_H_REQ_HEADER_FL",
7179 "IDMA_FL_H_SEND_PCIEHDR",
7180 "IDMA_FL_H_PUSH_CPL_FIFO",
7181 "IDMA_FL_H_SEND_CPL",
7182 "IDMA_FL_H_SEND_IP_HDR_FIRST",
7183 "IDMA_FL_H_SEND_IP_HDR",
7184 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
7185 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
7186 "IDMA_FL_H_SEND_IP_HDR_PADDING",
7187 "IDMA_FL_D_SEND_PCIEHDR",
7188 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
7189 "IDMA_FL_D_REQ_NEXT_DATA_FL",
7190 "IDMA_FL_SEND_PCIEHDR",
7191 "IDMA_FL_PUSH_CPL_FIFO",
7193 "IDMA_FL_SEND_PAYLOAD_FIRST",
7194 "IDMA_FL_SEND_PAYLOAD",
7195 "IDMA_FL_REQ_NEXT_DATA_FL",
7196 "IDMA_FL_SEND_NEXT_PCIEHDR",
7197 "IDMA_FL_SEND_PADDING",
7198 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
7200 static const char * const t6_decode[] = {
7202 "IDMA_PUSH_MORE_CPL_FIFO",
7203 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
7204 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
7205 "IDMA_PHYSADDR_SEND_PCIEHDR",
7206 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
7207 "IDMA_PHYSADDR_SEND_PAYLOAD",
7208 "IDMA_FL_REQ_DATA_FL",
7210 "IDMA_FL_DROP_SEND_INC",
7211 "IDMA_FL_H_REQ_HEADER_FL",
7212 "IDMA_FL_H_SEND_PCIEHDR",
7213 "IDMA_FL_H_PUSH_CPL_FIFO",
7214 "IDMA_FL_H_SEND_CPL",
7215 "IDMA_FL_H_SEND_IP_HDR_FIRST",
7216 "IDMA_FL_H_SEND_IP_HDR",
7217 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
7218 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
7219 "IDMA_FL_H_SEND_IP_HDR_PADDING",
7220 "IDMA_FL_D_SEND_PCIEHDR",
7221 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
7222 "IDMA_FL_D_REQ_NEXT_DATA_FL",
7223 "IDMA_FL_SEND_PCIEHDR",
7224 "IDMA_FL_PUSH_CPL_FIFO",
7226 "IDMA_FL_SEND_PAYLOAD_FIRST",
7227 "IDMA_FL_SEND_PAYLOAD",
7228 "IDMA_FL_REQ_NEXT_DATA_FL",
7229 "IDMA_FL_SEND_NEXT_PCIEHDR",
7230 "IDMA_FL_SEND_PADDING",
7231 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
7233 static const u32 sge_regs[] = {
7234 A_SGE_DEBUG_DATA_LOW_INDEX_2,
7235 A_SGE_DEBUG_DATA_LOW_INDEX_3,
7236 A_SGE_DEBUG_DATA_HIGH_INDEX_10,
7238 const char * const *sge_idma_decode;
7239 int sge_idma_decode_nstates;
7241 unsigned int chip_version = chip_id(adapter);
7243 /* Select the right set of decode strings to dump depending on the
7244 * adapter chip type.
7246 switch (chip_version) {
7248 sge_idma_decode = (const char * const *)t4_decode;
7249 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
7253 sge_idma_decode = (const char * const *)t5_decode;
7254 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
7258 sge_idma_decode = (const char * const *)t6_decode;
7259 sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
7263 CH_ERR(adapter, "Unsupported chip version %d\n", chip_version);
7267 if (state < sge_idma_decode_nstates)
7268 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
7270 CH_WARN(adapter, "idma state %d unknown\n", state);
7272 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
7273 CH_WARN(adapter, "SGE register %#x value %#x\n",
7274 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
7278 * t4_sge_ctxt_flush - flush the SGE context cache
7279 * @adap: the adapter
7280 * @mbox: mailbox to use for the FW command
7282 * Issues a FW command through the given mailbox to flush the
7283 * SGE context cache.
7285 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
7289 struct fw_ldst_cmd c;
7291 memset(&c, 0, sizeof(c));
7292 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC);
7293 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
7294 F_FW_CMD_REQUEST | F_FW_CMD_READ |
7296 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
7297 c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH);
7299 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7304 * t4_fw_hello - establish communication with FW
7305 * @adap: the adapter
7306 * @mbox: mailbox to use for the FW command
7307 * @evt_mbox: mailbox to receive async FW events
7308 * @master: specifies the caller's willingness to be the device master
7309 * @state: returns the current device state (if non-NULL)
7311 * Issues a command to establish communication with FW. Returns either
7312 * an error (negative integer) or the mailbox of the Master PF.
7314 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
7315 enum dev_master master, enum dev_state *state)
7318 struct fw_hello_cmd c;
7320 unsigned int master_mbox;
7321 int retries = FW_CMD_HELLO_RETRIES;
7324 memset(&c, 0, sizeof(c));
7325 INIT_CMD(c, HELLO, WRITE);
7326 c.err_to_clearinit = cpu_to_be32(
7327 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
7328 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
7329 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ?
7330 mbox : M_FW_HELLO_CMD_MBMASTER) |
7331 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
7332 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
7333 F_FW_HELLO_CMD_CLEARINIT);
7336 * Issue the HELLO command to the firmware. If it's not successful
7337 * but indicates that we got a "busy" or "timeout" condition, retry
7338 * the HELLO until we exhaust our retry limit. If we do exceed our
7339 * retry limit, check to see if the firmware left us any error
7340 * information and report that if so ...
7342 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7343 if (ret != FW_SUCCESS) {
7344 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
7346 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
7347 t4_report_fw_error(adap);
7351 v = be32_to_cpu(c.err_to_clearinit);
7352 master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
7354 if (v & F_FW_HELLO_CMD_ERR)
7355 *state = DEV_STATE_ERR;
7356 else if (v & F_FW_HELLO_CMD_INIT)
7357 *state = DEV_STATE_INIT;
7359 *state = DEV_STATE_UNINIT;
7363 * If we're not the Master PF then we need to wait around for the
7364 * Master PF Driver to finish setting up the adapter.
7366 * Note that we also do this wait if we're a non-Master-capable PF and
7367 * there is no current Master PF; a Master PF may show up momentarily
7368 * and we wouldn't want to fail pointlessly. (This can happen when an
7369 * OS loads lots of different drivers rapidly at the same time). In
7370 * this case, the Master PF returned by the firmware will be
7371 * M_PCIE_FW_MASTER so the test below will work ...
7373 if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
7374 master_mbox != mbox) {
7375 int waiting = FW_CMD_HELLO_TIMEOUT;
7378 * Wait for the firmware to either indicate an error or
7379 * initialized state. If we see either of these we bail out
7380 * and report the issue to the caller. If we exhaust the
7381 * "hello timeout" and we haven't exhausted our retries, try
7382 * again. Otherwise bail with a timeout error.
7391 * If neither Error nor Initialialized are indicated
7392 * by the firmware keep waiting till we exhaust our
7393 * timeout ... and then retry if we haven't exhausted
7396 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
7397 if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
7408 * We either have an Error or Initialized condition
7409 * report errors preferentially.
7412 if (pcie_fw & F_PCIE_FW_ERR)
7413 *state = DEV_STATE_ERR;
7414 else if (pcie_fw & F_PCIE_FW_INIT)
7415 *state = DEV_STATE_INIT;
7419 * If we arrived before a Master PF was selected and
7420 * there's not a valid Master PF, grab its identity
7423 if (master_mbox == M_PCIE_FW_MASTER &&
7424 (pcie_fw & F_PCIE_FW_MASTER_VLD))
7425 master_mbox = G_PCIE_FW_MASTER(pcie_fw);
7434 * t4_fw_bye - end communication with FW
7435 * @adap: the adapter
7436 * @mbox: mailbox to use for the FW command
7438 * Issues a command to terminate communication with FW.
7440 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
7442 struct fw_bye_cmd c;
7444 memset(&c, 0, sizeof(c));
7445 INIT_CMD(c, BYE, WRITE);
7446 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7450 * t4_fw_reset - issue a reset to FW
7451 * @adap: the adapter
7452 * @mbox: mailbox to use for the FW command
7453 * @reset: specifies the type of reset to perform
7455 * Issues a reset command of the specified type to FW.
7457 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
7459 struct fw_reset_cmd c;
7461 memset(&c, 0, sizeof(c));
7462 INIT_CMD(c, RESET, WRITE);
7463 c.val = cpu_to_be32(reset);
7464 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7468 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
7469 * @adap: the adapter
7470 * @mbox: mailbox to use for the FW RESET command (if desired)
7471 * @force: force uP into RESET even if FW RESET command fails
7473 * Issues a RESET command to firmware (if desired) with a HALT indication
7474 * and then puts the microprocessor into RESET state. The RESET command
7475 * will only be issued if a legitimate mailbox is provided (mbox <=
7476 * M_PCIE_FW_MASTER).
7478 * This is generally used in order for the host to safely manipulate the
7479 * adapter without fear of conflicting with whatever the firmware might
7480 * be doing. The only way out of this state is to RESTART the firmware
7483 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
7488 * If a legitimate mailbox is provided, issue a RESET command
7489 * with a HALT indication.
7491 if (adap->flags & FW_OK && mbox <= M_PCIE_FW_MASTER) {
7492 struct fw_reset_cmd c;
7494 memset(&c, 0, sizeof(c));
7495 INIT_CMD(c, RESET, WRITE);
7496 c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
7497 c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
7498 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7502 * Normally we won't complete the operation if the firmware RESET
7503 * command fails but if our caller insists we'll go ahead and put the
7504 * uP into RESET. This can be useful if the firmware is hung or even
7505 * missing ... We'll have to take the risk of putting the uP into
7506 * RESET without the cooperation of firmware in that case.
7508 * We also force the firmware's HALT flag to be on in case we bypassed
7509 * the firmware RESET command above or we're dealing with old firmware
7510 * which doesn't have the HALT capability. This will serve as a flag
7511 * for the incoming firmware to know that it's coming out of a HALT
7512 * rather than a RESET ... if it's new enough to understand that ...
7514 if (ret == 0 || force) {
7515 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
7516 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
7521 * And we always return the result of the firmware RESET command
7522 * even when we force the uP into RESET ...
7528 * t4_fw_restart - restart the firmware by taking the uP out of RESET
7529 * @adap: the adapter
7531 * Restart firmware previously halted by t4_fw_halt(). On successful
7532 * return the previous PF Master remains as the new PF Master and there
7533 * is no need to issue a new HELLO command, etc.
7535 int t4_fw_restart(struct adapter *adap, unsigned int mbox)
7539 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
7540 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
7541 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
7551 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
7552 * @adap: the adapter
7553 * @mbox: mailbox to use for the FW RESET command (if desired)
7554 * @fw_data: the firmware image to write
7556 * @force: force upgrade even if firmware doesn't cooperate
7558 * Perform all of the steps necessary for upgrading an adapter's
7559 * firmware image. Normally this requires the cooperation of the
7560 * existing firmware in order to halt all existing activities
7561 * but if an invalid mailbox token is passed in we skip that step
7562 * (though we'll still put the adapter microprocessor into RESET in
7565 * On successful return the new firmware will have been loaded and
7566 * the adapter will have been fully RESET losing all previous setup
7567 * state. On unsuccessful return the adapter may be completely hosed ...
7568 * positive errno indicates that the adapter is ~probably~ intact, a
7569 * negative errno indicates that things are looking bad ...
7571 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
7572 const u8 *fw_data, unsigned int size, int force)
7574 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
7575 unsigned int bootstrap =
7576 be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
7579 if (!t4_fw_matches_chip(adap, fw_hdr))
7583 ret = t4_fw_halt(adap, mbox, force);
7584 if (ret < 0 && !force)
7588 ret = t4_load_fw(adap, fw_data, size);
7589 if (ret < 0 || bootstrap)
7592 return t4_fw_restart(adap, mbox);
7596 * t4_fw_initialize - ask FW to initialize the device
7597 * @adap: the adapter
7598 * @mbox: mailbox to use for the FW command
7600 * Issues a command to FW to partially initialize the device. This
7601 * performs initialization that generally doesn't depend on user input.
7603 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
7605 struct fw_initialize_cmd c;
7607 memset(&c, 0, sizeof(c));
7608 INIT_CMD(c, INITIALIZE, WRITE);
7609 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7613 * t4_query_params_rw - query FW or device parameters
7614 * @adap: the adapter
7615 * @mbox: mailbox to use for the FW command
7618 * @nparams: the number of parameters
7619 * @params: the parameter names
7620 * @val: the parameter values
7621 * @rw: Write and read flag
7623 * Reads the value of FW or device parameters. Up to 7 parameters can be
7626 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
7627 unsigned int vf, unsigned int nparams, const u32 *params,
7631 struct fw_params_cmd c;
7632 __be32 *p = &c.param[0].mnem;
7637 memset(&c, 0, sizeof(c));
7638 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
7639 F_FW_CMD_REQUEST | F_FW_CMD_READ |
7640 V_FW_PARAMS_CMD_PFN(pf) |
7641 V_FW_PARAMS_CMD_VFN(vf));
7642 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7644 for (i = 0; i < nparams; i++) {
7645 *p++ = cpu_to_be32(*params++);
7647 *p = cpu_to_be32(*(val + i));
7651 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7653 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
7654 *val++ = be32_to_cpu(*p);
7658 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7659 unsigned int vf, unsigned int nparams, const u32 *params,
7662 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
7666 * t4_set_params_timeout - sets FW or device parameters
7667 * @adap: the adapter
7668 * @mbox: mailbox to use for the FW command
7671 * @nparams: the number of parameters
7672 * @params: the parameter names
7673 * @val: the parameter values
7674 * @timeout: the timeout time
7676 * Sets the value of FW or device parameters. Up to 7 parameters can be
7677 * specified at once.
7679 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
7680 unsigned int pf, unsigned int vf,
7681 unsigned int nparams, const u32 *params,
7682 const u32 *val, int timeout)
7684 struct fw_params_cmd c;
7685 __be32 *p = &c.param[0].mnem;
7690 memset(&c, 0, sizeof(c));
7691 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
7692 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7693 V_FW_PARAMS_CMD_PFN(pf) |
7694 V_FW_PARAMS_CMD_VFN(vf));
7695 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7698 *p++ = cpu_to_be32(*params++);
7699 *p++ = cpu_to_be32(*val++);
7702 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
7706 * t4_set_params - sets FW or device parameters
7707 * @adap: the adapter
7708 * @mbox: mailbox to use for the FW command
7711 * @nparams: the number of parameters
7712 * @params: the parameter names
7713 * @val: the parameter values
7715 * Sets the value of FW or device parameters. Up to 7 parameters can be
7716 * specified at once.
7718 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7719 unsigned int vf, unsigned int nparams, const u32 *params,
7722 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
7723 FW_CMD_MAX_TIMEOUT);
7727 * t4_cfg_pfvf - configure PF/VF resource limits
7728 * @adap: the adapter
7729 * @mbox: mailbox to use for the FW command
7730 * @pf: the PF being configured
7731 * @vf: the VF being configured
7732 * @txq: the max number of egress queues
7733 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
7734 * @rxqi: the max number of interrupt-capable ingress queues
7735 * @rxq: the max number of interruptless ingress queues
7736 * @tc: the PCI traffic class
7737 * @vi: the max number of virtual interfaces
7738 * @cmask: the channel access rights mask for the PF/VF
7739 * @pmask: the port access rights mask for the PF/VF
7740 * @nexact: the maximum number of exact MPS filters
7741 * @rcaps: read capabilities
7742 * @wxcaps: write/execute capabilities
7744 * Configures resource limits and capabilities for a physical or virtual
7747 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
7748 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
7749 unsigned int rxqi, unsigned int rxq, unsigned int tc,
7750 unsigned int vi, unsigned int cmask, unsigned int pmask,
7751 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
7753 struct fw_pfvf_cmd c;
7755 memset(&c, 0, sizeof(c));
7756 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
7757 F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
7758 V_FW_PFVF_CMD_VFN(vf));
7759 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7760 c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
7761 V_FW_PFVF_CMD_NIQ(rxq));
7762 c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) |
7763 V_FW_PFVF_CMD_PMASK(pmask) |
7764 V_FW_PFVF_CMD_NEQ(txq));
7765 c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) |
7766 V_FW_PFVF_CMD_NVI(vi) |
7767 V_FW_PFVF_CMD_NEXACTF(nexact));
7768 c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) |
7769 V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
7770 V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
7771 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7775 * t4_alloc_vi_func - allocate a virtual interface
7776 * @adap: the adapter
7777 * @mbox: mailbox to use for the FW command
7778 * @port: physical port associated with the VI
7779 * @pf: the PF owning the VI
7780 * @vf: the VF owning the VI
7781 * @nmac: number of MAC addresses needed (1 to 5)
7782 * @mac: the MAC addresses of the VI
7783 * @rss_size: size of RSS table slice associated with this VI
7784 * @portfunc: which Port Application Function MAC Address is desired
7785 * @idstype: Intrusion Detection Type
7787 * Allocates a virtual interface for the given physical port. If @mac is
7788 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
7789 * If @rss_size is %NULL the VI is not assigned any RSS slice by FW.
7790 * @mac should be large enough to hold @nmac Ethernet addresses, they are
7791 * stored consecutively so the space needed is @nmac * 6 bytes.
7792 * Returns a negative error number or the non-negative VI id.
7794 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
7795 unsigned int port, unsigned int pf, unsigned int vf,
7796 unsigned int nmac, u8 *mac, u16 *rss_size,
7797 unsigned int portfunc, unsigned int idstype)
7802 memset(&c, 0, sizeof(c));
7803 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
7804 F_FW_CMD_WRITE | F_FW_CMD_EXEC |
7805 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
7806 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
7807 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
7808 V_FW_VI_CMD_FUNC(portfunc));
7809 c.portid_pkd = V_FW_VI_CMD_PORTID(port);
7812 c.norss_rsssize = F_FW_VI_CMD_NORSS;
7814 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7819 memcpy(mac, c.mac, sizeof(c.mac));
7822 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
7824 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
7826 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
7828 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
7832 *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
7833 return G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid));
7837 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface
7838 * @adap: the adapter
7839 * @mbox: mailbox to use for the FW command
7840 * @port: physical port associated with the VI
7841 * @pf: the PF owning the VI
7842 * @vf: the VF owning the VI
7843 * @nmac: number of MAC addresses needed (1 to 5)
7844 * @mac: the MAC addresses of the VI
7845 * @rss_size: size of RSS table slice associated with this VI
7847 * backwards compatible and convieniance routine to allocate a Virtual
7848 * Interface with a Ethernet Port Application Function and Intrustion
7849 * Detection System disabled.
7851 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
7852 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
7855 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
7860 * t4_free_vi - free a virtual interface
7861 * @adap: the adapter
7862 * @mbox: mailbox to use for the FW command
7863 * @pf: the PF owning the VI
7864 * @vf: the VF owning the VI
7865 * @viid: virtual interface identifiler
7867 * Free a previously allocated virtual interface.
7869 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
7870 unsigned int vf, unsigned int viid)
7874 memset(&c, 0, sizeof(c));
7875 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
7878 V_FW_VI_CMD_PFN(pf) |
7879 V_FW_VI_CMD_VFN(vf));
7880 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
7881 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
7883 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7887 * t4_set_rxmode - set Rx properties of a virtual interface
7888 * @adap: the adapter
7889 * @mbox: mailbox to use for the FW command
7891 * @mtu: the new MTU or -1
7892 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
7893 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
7894 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
7895 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
7896 * @sleep_ok: if true we may sleep while awaiting command completion
7898 * Sets Rx properties of a virtual interface.
7900 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
7901 int mtu, int promisc, int all_multi, int bcast, int vlanex,
7904 struct fw_vi_rxmode_cmd c;
7906 /* convert to FW values */
7908 mtu = M_FW_VI_RXMODE_CMD_MTU;
7910 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
7912 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
7914 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
7916 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
7918 memset(&c, 0, sizeof(c));
7919 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
7920 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7921 V_FW_VI_RXMODE_CMD_VIID(viid));
7922 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7924 cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
7925 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
7926 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
7927 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
7928 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
7929 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7933 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
7934 * @adap: the adapter
7935 * @mbox: mailbox to use for the FW command
7937 * @free: if true any existing filters for this VI id are first removed
7938 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
7939 * @addr: the MAC address(es)
7940 * @idx: where to store the index of each allocated filter
7941 * @hash: pointer to hash address filter bitmap
7942 * @sleep_ok: call is allowed to sleep
7944 * Allocates an exact-match filter for each of the supplied addresses and
7945 * sets it to the corresponding address. If @idx is not %NULL it should
7946 * have at least @naddr entries, each of which will be set to the index of
7947 * the filter allocated for the corresponding MAC address. If a filter
7948 * could not be allocated for an address its index is set to 0xffff.
7949 * If @hash is not %NULL addresses that fail to allocate an exact filter
7950 * are hashed and update the hash filter bitmap pointed at by @hash.
7952 * Returns a negative error number or the number of filters allocated.
7954 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
7955 unsigned int viid, bool free, unsigned int naddr,
7956 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
7958 int offset, ret = 0;
7959 struct fw_vi_mac_cmd c;
7960 unsigned int nfilters = 0;
7961 unsigned int max_naddr = adap->chip_params->mps_tcam_size;
7962 unsigned int rem = naddr;
7964 if (naddr > max_naddr)
7967 for (offset = 0; offset < naddr ; /**/) {
7968 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
7970 : ARRAY_SIZE(c.u.exact));
7971 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
7972 u.exact[fw_naddr]), 16);
7973 struct fw_vi_mac_exact *p;
7976 memset(&c, 0, sizeof(c));
7977 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
7980 V_FW_CMD_EXEC(free) |
7981 V_FW_VI_MAC_CMD_VIID(viid));
7982 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) |
7983 V_FW_CMD_LEN16(len16));
7985 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7987 cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
7988 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
7989 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
7993 * It's okay if we run out of space in our MAC address arena.
7994 * Some of the addresses we submit may get stored so we need
7995 * to run through the reply to see what the results were ...
7997 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7998 if (ret && ret != -FW_ENOMEM)
8001 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8002 u16 index = G_FW_VI_MAC_CMD_IDX(
8003 be16_to_cpu(p->valid_to_idx));
8006 idx[offset+i] = (index >= max_naddr
8009 if (index < max_naddr)
8012 *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
8020 if (ret == 0 || ret == -FW_ENOMEM)
8026 * t4_change_mac - modifies the exact-match filter for a MAC address
8027 * @adap: the adapter
8028 * @mbox: mailbox to use for the FW command
8030 * @idx: index of existing filter for old value of MAC address, or -1
8031 * @addr: the new MAC address value
8032 * @persist: whether a new MAC allocation should be persistent
8033 * @add_smt: if true also add the address to the HW SMT
8035 * Modifies an exact-match filter and sets it to the new MAC address if
8036 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
8037 * latter case the address is added persistently if @persist is %true.
8039 * Note that in general it is not possible to modify the value of a given
8040 * filter so the generic way to modify an address filter is to free the one
8041 * being used by the old address value and allocate a new filter for the
8042 * new address value.
8044 * Returns a negative error number or the index of the filter with the new
8045 * MAC value. Note that this index may differ from @idx.
8047 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
8048 int idx, const u8 *addr, bool persist, bool add_smt)
8051 struct fw_vi_mac_cmd c;
8052 struct fw_vi_mac_exact *p = c.u.exact;
8053 unsigned int max_mac_addr = adap->chip_params->mps_tcam_size;
8055 if (idx < 0) /* new allocation */
8056 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
8057 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
8059 memset(&c, 0, sizeof(c));
8060 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8061 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8062 V_FW_VI_MAC_CMD_VIID(viid));
8063 c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1));
8064 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
8065 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
8066 V_FW_VI_MAC_CMD_IDX(idx));
8067 memcpy(p->macaddr, addr, sizeof(p->macaddr));
8069 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
8071 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
8072 if (ret >= max_mac_addr)
8079 * t4_set_addr_hash - program the MAC inexact-match hash filter
8080 * @adap: the adapter
8081 * @mbox: mailbox to use for the FW command
8083 * @ucast: whether the hash filter should also match unicast addresses
8084 * @vec: the value to be written to the hash filter
8085 * @sleep_ok: call is allowed to sleep
8087 * Sets the 64-bit inexact-match hash filter for a virtual interface.
8089 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
8090 bool ucast, u64 vec, bool sleep_ok)
8092 struct fw_vi_mac_cmd c;
8095 memset(&c, 0, sizeof(c));
8096 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8097 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8098 V_FW_VI_ENABLE_CMD_VIID(viid));
8099 val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) |
8100 V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1);
8101 c.freemacs_to_len16 = cpu_to_be32(val);
8102 c.u.hash.hashvec = cpu_to_be64(vec);
8103 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
8107 * t4_enable_vi_params - enable/disable a virtual interface
8108 * @adap: the adapter
8109 * @mbox: mailbox to use for the FW command
8111 * @rx_en: 1=enable Rx, 0=disable Rx
8112 * @tx_en: 1=enable Tx, 0=disable Tx
8113 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
8115 * Enables/disables a virtual interface. Note that setting DCB Enable
8116 * only makes sense when enabling a Virtual Interface ...
8118 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
8119 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
8121 struct fw_vi_enable_cmd c;
8123 memset(&c, 0, sizeof(c));
8124 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
8125 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8126 V_FW_VI_ENABLE_CMD_VIID(viid));
8127 c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
8128 V_FW_VI_ENABLE_CMD_EEN(tx_en) |
8129 V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
8131 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
8135 * t4_enable_vi - enable/disable a virtual interface
8136 * @adap: the adapter
8137 * @mbox: mailbox to use for the FW command
8139 * @rx_en: 1=enable Rx, 0=disable Rx
8140 * @tx_en: 1=enable Tx, 0=disable Tx
8142 * Enables/disables a virtual interface. Note that setting DCB Enable
8143 * only makes sense when enabling a Virtual Interface ...
8145 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
8146 bool rx_en, bool tx_en)
8148 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
8152 * t4_identify_port - identify a VI's port by blinking its LED
8153 * @adap: the adapter
8154 * @mbox: mailbox to use for the FW command
8156 * @nblinks: how many times to blink LED at 2.5 Hz
8158 * Identifies a VI's port by blinking its LED.
8160 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
8161 unsigned int nblinks)
8163 struct fw_vi_enable_cmd c;
8165 memset(&c, 0, sizeof(c));
8166 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
8167 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8168 V_FW_VI_ENABLE_CMD_VIID(viid));
8169 c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
8170 c.blinkdur = cpu_to_be16(nblinks);
8171 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8175 * t4_iq_stop - stop an ingress queue and its FLs
8176 * @adap: the adapter
8177 * @mbox: mailbox to use for the FW command
8178 * @pf: the PF owning the queues
8179 * @vf: the VF owning the queues
8180 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
8181 * @iqid: ingress queue id
8182 * @fl0id: FL0 queue id or 0xffff if no attached FL0
8183 * @fl1id: FL1 queue id or 0xffff if no attached FL1
8185 * Stops an ingress queue and its associated FLs, if any. This causes
8186 * any current or future data/messages destined for these queues to be
8189 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
8190 unsigned int vf, unsigned int iqtype, unsigned int iqid,
8191 unsigned int fl0id, unsigned int fl1id)
8195 memset(&c, 0, sizeof(c));
8196 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
8197 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
8198 V_FW_IQ_CMD_VFN(vf));
8199 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c));
8200 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
8201 c.iqid = cpu_to_be16(iqid);
8202 c.fl0id = cpu_to_be16(fl0id);
8203 c.fl1id = cpu_to_be16(fl1id);
8204 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8208 * t4_iq_free - free an ingress queue and its FLs
8209 * @adap: the adapter
8210 * @mbox: mailbox to use for the FW command
8211 * @pf: the PF owning the queues
8212 * @vf: the VF owning the queues
8213 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
8214 * @iqid: ingress queue id
8215 * @fl0id: FL0 queue id or 0xffff if no attached FL0
8216 * @fl1id: FL1 queue id or 0xffff if no attached FL1
8218 * Frees an ingress queue and its associated FLs, if any.
8220 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8221 unsigned int vf, unsigned int iqtype, unsigned int iqid,
8222 unsigned int fl0id, unsigned int fl1id)
8226 memset(&c, 0, sizeof(c));
8227 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
8228 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
8229 V_FW_IQ_CMD_VFN(vf));
8230 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
8231 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
8232 c.iqid = cpu_to_be16(iqid);
8233 c.fl0id = cpu_to_be16(fl0id);
8234 c.fl1id = cpu_to_be16(fl1id);
8235 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8239 * t4_eth_eq_free - free an Ethernet egress queue
8240 * @adap: the adapter
8241 * @mbox: mailbox to use for the FW command
8242 * @pf: the PF owning the queue
8243 * @vf: the VF owning the queue
8244 * @eqid: egress queue id
8246 * Frees an Ethernet egress queue.
8248 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8249 unsigned int vf, unsigned int eqid)
8251 struct fw_eq_eth_cmd c;
8253 memset(&c, 0, sizeof(c));
8254 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
8255 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8256 V_FW_EQ_ETH_CMD_PFN(pf) |
8257 V_FW_EQ_ETH_CMD_VFN(vf));
8258 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
8259 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
8260 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8264 * t4_ctrl_eq_free - free a control egress queue
8265 * @adap: the adapter
8266 * @mbox: mailbox to use for the FW command
8267 * @pf: the PF owning the queue
8268 * @vf: the VF owning the queue
8269 * @eqid: egress queue id
8271 * Frees a control egress queue.
8273 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8274 unsigned int vf, unsigned int eqid)
8276 struct fw_eq_ctrl_cmd c;
8278 memset(&c, 0, sizeof(c));
8279 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
8280 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8281 V_FW_EQ_CTRL_CMD_PFN(pf) |
8282 V_FW_EQ_CTRL_CMD_VFN(vf));
8283 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
8284 c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
8285 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8289 * t4_ofld_eq_free - free an offload egress queue
8290 * @adap: the adapter
8291 * @mbox: mailbox to use for the FW command
8292 * @pf: the PF owning the queue
8293 * @vf: the VF owning the queue
8294 * @eqid: egress queue id
8296 * Frees a control egress queue.
8298 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8299 unsigned int vf, unsigned int eqid)
8301 struct fw_eq_ofld_cmd c;
8303 memset(&c, 0, sizeof(c));
8304 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) |
8305 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8306 V_FW_EQ_OFLD_CMD_PFN(pf) |
8307 V_FW_EQ_OFLD_CMD_VFN(vf));
8308 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
8309 c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid));
8310 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8314 * t4_link_down_rc_str - return a string for a Link Down Reason Code
8315 * @link_down_rc: Link Down Reason Code
8317 * Returns a string representation of the Link Down Reason Code.
8319 const char *t4_link_down_rc_str(unsigned char link_down_rc)
8321 static const char *reason[] = {
8324 "Auto-negotiation Failure",
8326 "Insufficient Airflow",
8327 "Unable To Determine Reason",
8328 "No RX Signal Detected",
8332 if (link_down_rc >= ARRAY_SIZE(reason))
8333 return "Bad Reason Code";
8335 return reason[link_down_rc];
8339 * Return the highest speed set in the port capabilities, in Mb/s.
8341 unsigned int fwcap_to_speed(uint32_t caps)
8343 #define TEST_SPEED_RETURN(__caps_speed, __speed) \
8345 if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
8349 TEST_SPEED_RETURN(400G, 400000);
8350 TEST_SPEED_RETURN(200G, 200000);
8351 TEST_SPEED_RETURN(100G, 100000);
8352 TEST_SPEED_RETURN(50G, 50000);
8353 TEST_SPEED_RETURN(40G, 40000);
8354 TEST_SPEED_RETURN(25G, 25000);
8355 TEST_SPEED_RETURN(10G, 10000);
8356 TEST_SPEED_RETURN(1G, 1000);
8357 TEST_SPEED_RETURN(100M, 100);
8359 #undef TEST_SPEED_RETURN
8365 * Return the port capabilities bit for the given speed, which is in Mb/s.
8367 uint32_t speed_to_fwcap(unsigned int speed)
8369 #define TEST_SPEED_RETURN(__caps_speed, __speed) \
8371 if (speed == __speed) \
8372 return FW_PORT_CAP32_SPEED_##__caps_speed; \
8375 TEST_SPEED_RETURN(400G, 400000);
8376 TEST_SPEED_RETURN(200G, 200000);
8377 TEST_SPEED_RETURN(100G, 100000);
8378 TEST_SPEED_RETURN(50G, 50000);
8379 TEST_SPEED_RETURN(40G, 40000);
8380 TEST_SPEED_RETURN(25G, 25000);
8381 TEST_SPEED_RETURN(10G, 10000);
8382 TEST_SPEED_RETURN(1G, 1000);
8383 TEST_SPEED_RETURN(100M, 100);
8385 #undef TEST_SPEED_RETURN
8391 * Return the port capabilities bit for the highest speed in the capabilities.
8393 uint32_t fwcap_top_speed(uint32_t caps)
8395 #define TEST_SPEED_RETURN(__caps_speed) \
8397 if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
8398 return FW_PORT_CAP32_SPEED_##__caps_speed; \
8401 TEST_SPEED_RETURN(400G);
8402 TEST_SPEED_RETURN(200G);
8403 TEST_SPEED_RETURN(100G);
8404 TEST_SPEED_RETURN(50G);
8405 TEST_SPEED_RETURN(40G);
8406 TEST_SPEED_RETURN(25G);
8407 TEST_SPEED_RETURN(10G);
8408 TEST_SPEED_RETURN(1G);
8409 TEST_SPEED_RETURN(100M);
8411 #undef TEST_SPEED_RETURN
8418 * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities
8419 * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value
8421 * Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new
8422 * 32-bit Port Capabilities value.
8424 static uint32_t lstatus_to_fwcap(u32 lstatus)
8426 uint32_t linkattr = 0;
8429 * Unfortunately the format of the Link Status in the old
8430 * 16-bit Port Information message isn't the same as the
8431 * 16-bit Port Capabilities bitfield used everywhere else ...
8433 if (lstatus & F_FW_PORT_CMD_RXPAUSE)
8434 linkattr |= FW_PORT_CAP32_FC_RX;
8435 if (lstatus & F_FW_PORT_CMD_TXPAUSE)
8436 linkattr |= FW_PORT_CAP32_FC_TX;
8437 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
8438 linkattr |= FW_PORT_CAP32_SPEED_100M;
8439 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
8440 linkattr |= FW_PORT_CAP32_SPEED_1G;
8441 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
8442 linkattr |= FW_PORT_CAP32_SPEED_10G;
8443 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
8444 linkattr |= FW_PORT_CAP32_SPEED_25G;
8445 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
8446 linkattr |= FW_PORT_CAP32_SPEED_40G;
8447 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
8448 linkattr |= FW_PORT_CAP32_SPEED_100G;
8454 * Updates all fields owned by the common code in port_info and link_config
8455 * based on information provided by the firmware. Does not touch any
8456 * requested_* field.
8458 static void handle_port_info(struct port_info *pi, const struct fw_port_cmd *p,
8459 enum fw_port_action action, bool *mod_changed, bool *link_changed)
8461 struct link_config old_lc, *lc = &pi->link_cfg;
8462 unsigned char fc, fec;
8464 int old_ptype, old_mtype;
8466 old_ptype = pi->port_type;
8467 old_mtype = pi->mod_type;
8469 if (action == FW_PORT_ACTION_GET_PORT_INFO) {
8470 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
8472 pi->port_type = G_FW_PORT_CMD_PTYPE(stat);
8473 pi->mod_type = G_FW_PORT_CMD_MODTYPE(stat);
8474 pi->mdio_addr = stat & F_FW_PORT_CMD_MDIOCAP ?
8475 G_FW_PORT_CMD_MDIOADDR(stat) : -1;
8477 lc->supported = fwcaps16_to_caps32(be16_to_cpu(p->u.info.pcap));
8478 lc->advertising = fwcaps16_to_caps32(be16_to_cpu(p->u.info.acap));
8479 lc->lp_advertising = fwcaps16_to_caps32(be16_to_cpu(p->u.info.lpacap));
8480 lc->link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
8481 lc->link_down_rc = G_FW_PORT_CMD_LINKDNRC(stat);
8483 linkattr = lstatus_to_fwcap(stat);
8484 } else if (action == FW_PORT_ACTION_GET_PORT_INFO32) {
8485 stat = be32_to_cpu(p->u.info32.lstatus32_to_cbllen32);
8487 pi->port_type = G_FW_PORT_CMD_PORTTYPE32(stat);
8488 pi->mod_type = G_FW_PORT_CMD_MODTYPE32(stat);
8489 pi->mdio_addr = stat & F_FW_PORT_CMD_MDIOCAP32 ?
8490 G_FW_PORT_CMD_MDIOADDR32(stat) : -1;
8492 lc->supported = be32_to_cpu(p->u.info32.pcaps32);
8493 lc->advertising = be32_to_cpu(p->u.info32.acaps32);
8494 lc->lp_advertising = be16_to_cpu(p->u.info32.lpacaps32);
8495 lc->link_ok = (stat & F_FW_PORT_CMD_LSTATUS32) != 0;
8496 lc->link_down_rc = G_FW_PORT_CMD_LINKDNRC32(stat);
8498 linkattr = be32_to_cpu(p->u.info32.linkattr32);
8500 CH_ERR(pi->adapter, "bad port_info action 0x%x\n", action);
8504 lc->speed = fwcap_to_speed(linkattr);
8507 if (linkattr & FW_PORT_CAP32_FC_RX)
8509 if (linkattr & FW_PORT_CAP32_FC_TX)
8514 if (linkattr & FW_PORT_CAP32_FEC_RS)
8516 if (linkattr & FW_PORT_CAP32_FEC_BASER_RS)
8517 fec |= FEC_BASER_RS;
8520 if (mod_changed != NULL)
8521 *mod_changed = false;
8522 if (link_changed != NULL)
8523 *link_changed = false;
8524 if (old_ptype != pi->port_type || old_mtype != pi->mod_type ||
8525 old_lc.supported != lc->supported) {
8526 if (pi->mod_type != FW_PORT_MOD_TYPE_NONE) {
8527 lc->fec_hint = lc->advertising &
8528 V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC);
8530 if (mod_changed != NULL)
8531 *mod_changed = true;
8533 if (old_lc.link_ok != lc->link_ok || old_lc.speed != lc->speed ||
8534 old_lc.fec != lc->fec || old_lc.fc != lc->fc) {
8535 if (link_changed != NULL)
8536 *link_changed = true;
8541 * t4_update_port_info - retrieve and update port information if changed
8542 * @pi: the port_info
8544 * We issue a Get Port Information Command to the Firmware and, if
8545 * successful, we check to see if anything is different from what we
8546 * last recorded and update things accordingly.
8548 int t4_update_port_info(struct port_info *pi)
8550 struct adapter *sc = pi->adapter;
8551 struct fw_port_cmd cmd;
8552 enum fw_port_action action;
8555 memset(&cmd, 0, sizeof(cmd));
8556 cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
8557 F_FW_CMD_REQUEST | F_FW_CMD_READ |
8558 V_FW_PORT_CMD_PORTID(pi->tx_chan));
8559 action = sc->params.port_caps32 ? FW_PORT_ACTION_GET_PORT_INFO32 :
8560 FW_PORT_ACTION_GET_PORT_INFO;
8561 cmd.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(action) |
8563 ret = t4_wr_mbox_ns(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
8567 handle_port_info(pi, &cmd, action, NULL, NULL);
8572 * t4_handle_fw_rpl - process a FW reply message
8573 * @adap: the adapter
8574 * @rpl: start of the FW message
8576 * Processes a FW message, such as link state change messages.
8578 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
8580 u8 opcode = *(const u8 *)rpl;
8581 const struct fw_port_cmd *p = (const void *)rpl;
8582 enum fw_port_action action =
8583 G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
8584 bool mod_changed, link_changed;
8586 if (opcode == FW_PORT_CMD &&
8587 (action == FW_PORT_ACTION_GET_PORT_INFO ||
8588 action == FW_PORT_ACTION_GET_PORT_INFO32)) {
8589 /* link/module state change message */
8591 int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
8592 struct port_info *pi = NULL;
8593 struct link_config *lc;
8595 for_each_port(adap, i) {
8596 pi = adap2pinfo(adap, i);
8597 if (pi->tx_chan == chan)
8603 handle_port_info(pi, p, action, &mod_changed, &link_changed);
8606 t4_os_portmod_changed(pi);
8609 t4_os_link_changed(pi);
8613 CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode);
8620 * get_pci_mode - determine a card's PCI mode
8621 * @adapter: the adapter
8622 * @p: where to store the PCI settings
8624 * Determines a card's PCI mode and associated parameters, such as speed
8627 static void get_pci_mode(struct adapter *adapter,
8628 struct pci_params *p)
8633 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
8635 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
8636 p->speed = val & PCI_EXP_LNKSTA_CLS;
8637 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
8642 u32 vendor_and_model_id;
8646 int t4_get_flash_params(struct adapter *adapter)
8649 * Table for non-standard supported Flash parts. Note, all Flash
8650 * parts must have 64KB sectors.
8652 static struct flash_desc supported_flash[] = {
8653 { 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
8658 unsigned int part, manufacturer;
8659 unsigned int density, size = 0;
8663 * Issue a Read ID Command to the Flash part. We decode supported
8664 * Flash parts and their sizes from this. There's a newer Query
8665 * Command which can retrieve detailed geometry information but many
8666 * Flash parts don't support it.
8668 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
8670 ret = sf1_read(adapter, 3, 0, 1, &flashid);
8671 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
8676 * Check to see if it's one of our non-standard supported Flash parts.
8678 for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
8679 if (supported_flash[part].vendor_and_model_id == flashid) {
8680 adapter->params.sf_size =
8681 supported_flash[part].size_mb;
8682 adapter->params.sf_nsec =
8683 adapter->params.sf_size / SF_SEC_SIZE;
8688 * Decode Flash part size. The code below looks repetative with
8689 * common encodings, but that's not guaranteed in the JEDEC
8690 * specification for the Read JADEC ID command. The only thing that
8691 * we're guaranteed by the JADEC specification is where the
8692 * Manufacturer ID is in the returned result. After that each
8693 * Manufacturer ~could~ encode things completely differently.
8694 * Note, all Flash parts must have 64KB sectors.
8696 manufacturer = flashid & 0xff;
8697 switch (manufacturer) {
8698 case 0x20: /* Micron/Numonix */
8700 * This Density -> Size decoding table is taken from Micron
8703 density = (flashid >> 16) & 0xff;
8705 case 0x14: size = 1 << 20; break; /* 1MB */
8706 case 0x15: size = 1 << 21; break; /* 2MB */
8707 case 0x16: size = 1 << 22; break; /* 4MB */
8708 case 0x17: size = 1 << 23; break; /* 8MB */
8709 case 0x18: size = 1 << 24; break; /* 16MB */
8710 case 0x19: size = 1 << 25; break; /* 32MB */
8711 case 0x20: size = 1 << 26; break; /* 64MB */
8712 case 0x21: size = 1 << 27; break; /* 128MB */
8713 case 0x22: size = 1 << 28; break; /* 256MB */
8717 case 0x9d: /* ISSI -- Integrated Silicon Solution, Inc. */
8719 * This Density -> Size decoding table is taken from ISSI
8722 density = (flashid >> 16) & 0xff;
8724 case 0x16: size = 1 << 25; break; /* 32MB */
8725 case 0x17: size = 1 << 26; break; /* 64MB */
8729 case 0xc2: /* Macronix */
8731 * This Density -> Size decoding table is taken from Macronix
8734 density = (flashid >> 16) & 0xff;
8736 case 0x17: size = 1 << 23; break; /* 8MB */
8737 case 0x18: size = 1 << 24; break; /* 16MB */
8741 case 0xef: /* Winbond */
8743 * This Density -> Size decoding table is taken from Winbond
8746 density = (flashid >> 16) & 0xff;
8748 case 0x17: size = 1 << 23; break; /* 8MB */
8749 case 0x18: size = 1 << 24; break; /* 16MB */
8754 /* If we didn't recognize the FLASH part, that's no real issue: the
8755 * Hardware/Software contract says that Hardware will _*ALWAYS*_
8756 * use a FLASH part which is at least 4MB in size and has 64KB
8757 * sectors. The unrecognized FLASH part is likely to be much larger
8758 * than 4MB, but that's all we really need.
8761 CH_WARN(adapter, "Unknown Flash Part, ID = %#x, assuming 4MB\n", flashid);
8766 * Store decoded Flash size and fall through into vetting code.
8768 adapter->params.sf_size = size;
8769 adapter->params.sf_nsec = size / SF_SEC_SIZE;
8773 * We should ~probably~ reject adapters with FLASHes which are too
8774 * small but we have some legacy FPGAs with small FLASHes that we'd
8775 * still like to use. So instead we emit a scary message ...
8777 if (adapter->params.sf_size < FLASH_MIN_SIZE)
8778 CH_WARN(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
8779 flashid, adapter->params.sf_size, FLASH_MIN_SIZE);
8784 static void set_pcie_completion_timeout(struct adapter *adapter,
8790 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
8792 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
8795 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
8799 const struct chip_params *t4_get_chip_params(int chipid)
8801 static const struct chip_params chip_params[] = {
8805 .pm_stats_cnt = PM_NSTATS,
8806 .cng_ch_bits_log = 2,
8808 .cim_num_obq = CIM_NUM_OBQ,
8809 .mps_rplc_size = 128,
8811 .sge_fl_db = F_DBPRIO,
8812 .mps_tcam_size = NUM_MPS_CLS_SRAM_L_INSTANCES,
8817 .pm_stats_cnt = PM_NSTATS,
8818 .cng_ch_bits_log = 2,
8820 .cim_num_obq = CIM_NUM_OBQ_T5,
8821 .mps_rplc_size = 128,
8823 .sge_fl_db = F_DBPRIO | F_DBTYPE,
8824 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
8829 .pm_stats_cnt = T6_PM_NSTATS,
8830 .cng_ch_bits_log = 3,
8832 .cim_num_obq = CIM_NUM_OBQ_T5,
8833 .mps_rplc_size = 256,
8836 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
8840 chipid -= CHELSIO_T4;
8841 if (chipid < 0 || chipid >= ARRAY_SIZE(chip_params))
8844 return &chip_params[chipid];
8848 * t4_prep_adapter - prepare SW and HW for operation
8849 * @adapter: the adapter
8850 * @buf: temporary space of at least VPD_LEN size provided by the caller.
8852 * Initialize adapter SW state for the various HW modules, set initial
8853 * values for some adapter tunables, take PHYs out of reset, and
8854 * initialize the MDIO interface.
8856 int t4_prep_adapter(struct adapter *adapter, u32 *buf)
8862 get_pci_mode(adapter, &adapter->params.pci);
8864 pl_rev = t4_read_reg(adapter, A_PL_REV);
8865 adapter->params.chipid = G_CHIPID(pl_rev);
8866 adapter->params.rev = G_REV(pl_rev);
8867 if (adapter->params.chipid == 0) {
8868 /* T4 did not have chipid in PL_REV (T5 onwards do) */
8869 adapter->params.chipid = CHELSIO_T4;
8871 /* T4A1 chip is not supported */
8872 if (adapter->params.rev == 1) {
8873 CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n");
8878 adapter->chip_params = t4_get_chip_params(chip_id(adapter));
8879 if (adapter->chip_params == NULL)
8882 adapter->params.pci.vpd_cap_addr =
8883 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
8885 ret = t4_get_flash_params(adapter);
8889 /* Cards with real ASICs have the chipid in the PCIe device id */
8890 t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id);
8891 if (device_id >> 12 == chip_id(adapter))
8892 adapter->params.cim_la_size = CIMLA_SIZE;
8895 adapter->params.fpga = 1;
8896 adapter->params.cim_la_size = 2 * CIMLA_SIZE;
8899 ret = get_vpd_params(adapter, &adapter->params.vpd, device_id, buf);
8903 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
8906 * Default port and clock for debugging in case we can't reach FW.
8908 adapter->params.nports = 1;
8909 adapter->params.portvec = 1;
8910 adapter->params.vpd.cclk = 50000;
8912 /* Set pci completion timeout value to 4 seconds. */
8913 set_pcie_completion_timeout(adapter, 0xd);
8918 * t4_shutdown_adapter - shut down adapter, host & wire
8919 * @adapter: the adapter
8921 * Perform an emergency shutdown of the adapter and stop it from
8922 * continuing any further communication on the ports or DMA to the
8923 * host. This is typically used when the adapter and/or firmware
8924 * have crashed and we want to prevent any further accidental
8925 * communication with the rest of the world. This will also force
8926 * the port Link Status to go down -- if register writes work --
8927 * which should help our peers figure out that we're down.
8929 int t4_shutdown_adapter(struct adapter *adapter)
8933 t4_intr_disable(adapter);
8934 t4_write_reg(adapter, A_DBG_GPIO_EN, 0);
8935 for_each_port(adapter, port) {
8936 u32 a_port_cfg = is_t4(adapter) ?
8937 PORT_REG(port, A_XGMAC_PORT_CFG) :
8938 T5_PORT_REG(port, A_MAC_PORT_CFG);
8940 t4_write_reg(adapter, a_port_cfg,
8941 t4_read_reg(adapter, a_port_cfg)
8942 & ~V_SIGNAL_DET(1));
8944 t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0);
8950 * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
8951 * @adapter: the adapter
8952 * @qid: the Queue ID
8953 * @qtype: the Ingress or Egress type for @qid
8954 * @user: true if this request is for a user mode queue
8955 * @pbar2_qoffset: BAR2 Queue Offset
8956 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
8958 * Returns the BAR2 SGE Queue Registers information associated with the
8959 * indicated Absolute Queue ID. These are passed back in return value
8960 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
8961 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
8963 * This may return an error which indicates that BAR2 SGE Queue
8964 * registers aren't available. If an error is not returned, then the
8965 * following values are returned:
8967 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
8968 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
8970 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
8971 * require the "Inferred Queue ID" ability may be used. E.g. the
8972 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
8973 * then these "Inferred Queue ID" register may not be used.
8975 int t4_bar2_sge_qregs(struct adapter *adapter,
8977 enum t4_bar2_qtype qtype,
8980 unsigned int *pbar2_qid)
8982 unsigned int page_shift, page_size, qpp_shift, qpp_mask;
8983 u64 bar2_page_offset, bar2_qoffset;
8984 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
8986 /* T4 doesn't support BAR2 SGE Queue registers for kernel
8989 if (!user && is_t4(adapter))
8992 /* Get our SGE Page Size parameters.
8994 page_shift = adapter->params.sge.page_shift;
8995 page_size = 1 << page_shift;
8997 /* Get the right Queues per Page parameters for our Queue.
8999 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
9000 ? adapter->params.sge.eq_s_qpp
9001 : adapter->params.sge.iq_s_qpp);
9002 qpp_mask = (1 << qpp_shift) - 1;
9004 /* Calculate the basics of the BAR2 SGE Queue register area:
9005 * o The BAR2 page the Queue registers will be in.
9006 * o The BAR2 Queue ID.
9007 * o The BAR2 Queue ID Offset into the BAR2 page.
9009 bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
9010 bar2_qid = qid & qpp_mask;
9011 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
9013 /* If the BAR2 Queue ID Offset is less than the Page Size, then the
9014 * hardware will infer the Absolute Queue ID simply from the writes to
9015 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
9016 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
9017 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
9018 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
9019 * from the BAR2 Page and BAR2 Queue ID.
9021 * One important censequence of this is that some BAR2 SGE registers
9022 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
9023 * there. But other registers synthesize the SGE Queue ID purely
9024 * from the writes to the registers -- the Write Combined Doorbell
9025 * Buffer is a good example. These BAR2 SGE Registers are only
9026 * available for those BAR2 SGE Register areas where the SGE Absolute
9027 * Queue ID can be inferred from simple writes.
9029 bar2_qoffset = bar2_page_offset;
9030 bar2_qinferred = (bar2_qid_offset < page_size);
9031 if (bar2_qinferred) {
9032 bar2_qoffset += bar2_qid_offset;
9036 *pbar2_qoffset = bar2_qoffset;
9037 *pbar2_qid = bar2_qid;
9042 * t4_init_devlog_params - initialize adapter->params.devlog
9043 * @adap: the adapter
9044 * @fw_attach: whether we can talk to the firmware
9046 * Initialize various fields of the adapter's Firmware Device Log
9047 * Parameters structure.
9049 int t4_init_devlog_params(struct adapter *adap, int fw_attach)
9051 struct devlog_params *dparams = &adap->params.devlog;
9053 unsigned int devlog_meminfo;
9054 struct fw_devlog_cmd devlog_cmd;
9057 /* If we're dealing with newer firmware, the Device Log Paramerters
9058 * are stored in a designated register which allows us to access the
9059 * Device Log even if we can't talk to the firmware.
9062 t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG));
9064 unsigned int nentries, nentries128;
9066 dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams);
9067 dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4;
9069 nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams);
9070 nentries = (nentries128 + 1) * 128;
9071 dparams->size = nentries * sizeof(struct fw_devlog_e);
9077 * For any failing returns ...
9079 memset(dparams, 0, sizeof *dparams);
9082 * If we can't talk to the firmware, there's really nothing we can do
9088 /* Otherwise, ask the firmware for it's Device Log Parameters.
9090 memset(&devlog_cmd, 0, sizeof devlog_cmd);
9091 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
9092 F_FW_CMD_REQUEST | F_FW_CMD_READ);
9093 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
9094 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
9100 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
9101 dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo);
9102 dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4;
9103 dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
9109 * t4_init_sge_params - initialize adap->params.sge
9110 * @adapter: the adapter
9112 * Initialize various fields of the adapter's SGE Parameters structure.
9114 int t4_init_sge_params(struct adapter *adapter)
9117 struct sge_params *sp = &adapter->params.sge;
9118 unsigned i, tscale = 1;
9120 r = t4_read_reg(adapter, A_SGE_INGRESS_RX_THRESHOLD);
9121 sp->counter_val[0] = G_THRESHOLD_0(r);
9122 sp->counter_val[1] = G_THRESHOLD_1(r);
9123 sp->counter_val[2] = G_THRESHOLD_2(r);
9124 sp->counter_val[3] = G_THRESHOLD_3(r);
9126 if (chip_id(adapter) >= CHELSIO_T6) {
9127 r = t4_read_reg(adapter, A_SGE_ITP_CONTROL);
9128 tscale = G_TSCALE(r);
9135 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_0_AND_1);
9136 sp->timer_val[0] = core_ticks_to_us(adapter, G_TIMERVALUE0(r)) * tscale;
9137 sp->timer_val[1] = core_ticks_to_us(adapter, G_TIMERVALUE1(r)) * tscale;
9138 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_2_AND_3);
9139 sp->timer_val[2] = core_ticks_to_us(adapter, G_TIMERVALUE2(r)) * tscale;
9140 sp->timer_val[3] = core_ticks_to_us(adapter, G_TIMERVALUE3(r)) * tscale;
9141 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_4_AND_5);
9142 sp->timer_val[4] = core_ticks_to_us(adapter, G_TIMERVALUE4(r)) * tscale;
9143 sp->timer_val[5] = core_ticks_to_us(adapter, G_TIMERVALUE5(r)) * tscale;
9145 r = t4_read_reg(adapter, A_SGE_CONM_CTRL);
9146 sp->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1;
9148 sp->fl_starve_threshold2 = sp->fl_starve_threshold;
9149 else if (is_t5(adapter))
9150 sp->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(r) * 2 + 1;
9152 sp->fl_starve_threshold2 = G_T6_EGRTHRESHOLDPACKING(r) * 2 + 1;
9154 /* egress queues: log2 of # of doorbells per BAR2 page */
9155 r = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
9156 r >>= S_QUEUESPERPAGEPF0 +
9157 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
9158 sp->eq_s_qpp = r & M_QUEUESPERPAGEPF0;
9160 /* ingress queues: log2 of # of doorbells per BAR2 page */
9161 r = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
9162 r >>= S_QUEUESPERPAGEPF0 +
9163 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
9164 sp->iq_s_qpp = r & M_QUEUESPERPAGEPF0;
9166 r = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
9167 r >>= S_HOSTPAGESIZEPF0 +
9168 (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf;
9169 sp->page_shift = (r & M_HOSTPAGESIZEPF0) + 10;
9171 r = t4_read_reg(adapter, A_SGE_CONTROL);
9172 sp->sge_control = r;
9173 sp->spg_len = r & F_EGRSTATUSPAGESIZE ? 128 : 64;
9174 sp->fl_pktshift = G_PKTSHIFT(r);
9175 if (chip_id(adapter) <= CHELSIO_T5) {
9176 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) +
9177 X_INGPADBOUNDARY_SHIFT);
9179 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) +
9180 X_T6_INGPADBOUNDARY_SHIFT);
9183 sp->pack_boundary = sp->pad_boundary;
9185 r = t4_read_reg(adapter, A_SGE_CONTROL2);
9186 if (G_INGPACKBOUNDARY(r) == 0)
9187 sp->pack_boundary = 16;
9189 sp->pack_boundary = 1 << (G_INGPACKBOUNDARY(r) + 5);
9191 for (i = 0; i < SGE_FLBUF_SIZES; i++)
9192 sp->sge_fl_buffer_size[i] = t4_read_reg(adapter,
9193 A_SGE_FL_BUFFER_SIZE0 + (4 * i));
9199 * Read and cache the adapter's compressed filter mode and ingress config.
9201 static void read_filter_mode_and_ingress_config(struct adapter *adap,
9205 struct tp_params *tpp = &adap->params.tp;
9207 t4_tp_pio_read(adap, &tpp->vlan_pri_map, 1, A_TP_VLAN_PRI_MAP,
9209 t4_tp_pio_read(adap, &tpp->ingress_config, 1, A_TP_INGRESS_CONFIG,
9213 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
9214 * shift positions of several elements of the Compressed Filter Tuple
9215 * for this adapter which we need frequently ...
9217 tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE);
9218 tpp->port_shift = t4_filter_field_shift(adap, F_PORT);
9219 tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
9220 tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN);
9221 tpp->tos_shift = t4_filter_field_shift(adap, F_TOS);
9222 tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
9223 tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE);
9224 tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH);
9225 tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE);
9226 tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION);
9228 if (chip_id(adap) > CHELSIO_T4) {
9229 v = t4_read_reg(adap, LE_HASH_MASK_GEN_IPV4T5(3));
9230 adap->params.tp.hash_filter_mask = v;
9231 v = t4_read_reg(adap, LE_HASH_MASK_GEN_IPV4T5(4));
9232 adap->params.tp.hash_filter_mask |= (u64)v << 32;
9237 * t4_init_tp_params - initialize adap->params.tp
9238 * @adap: the adapter
9240 * Initialize various fields of the adapter's TP Parameters structure.
9242 int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
9246 struct tp_params *tpp = &adap->params.tp;
9248 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
9249 tpp->tre = G_TIMERRESOLUTION(v);
9250 tpp->dack_re = G_DELAYEDACKRESOLUTION(v);
9252 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
9253 for (chan = 0; chan < MAX_NCHAN; chan++)
9254 tpp->tx_modq[chan] = chan;
9256 read_filter_mode_and_ingress_config(adap, sleep_ok);
9259 * Cache a mask of the bits that represent the error vector portion of
9260 * rx_pkt.err_vec. T6+ can use a compressed error vector to make room
9261 * for information about outer encapsulation (GENEVE/VXLAN/NVGRE).
9263 tpp->err_vec_mask = htobe16(0xffff);
9264 if (chip_id(adap) > CHELSIO_T5) {
9265 v = t4_read_reg(adap, A_TP_OUT_CONFIG);
9266 if (v & F_CRXPKTENC) {
9268 htobe16(V_T6_COMPR_RXERR_VEC(M_T6_COMPR_RXERR_VEC));
9276 * t4_filter_field_shift - calculate filter field shift
9277 * @adap: the adapter
9278 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
9280 * Return the shift position of a filter field within the Compressed
9281 * Filter Tuple. The filter field is specified via its selection bit
9282 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
9284 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
9286 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
9290 if ((filter_mode & filter_sel) == 0)
9293 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
9294 switch (filter_mode & sel) {
9296 field_shift += W_FT_FCOE;
9299 field_shift += W_FT_PORT;
9302 field_shift += W_FT_VNIC_ID;
9305 field_shift += W_FT_VLAN;
9308 field_shift += W_FT_TOS;
9311 field_shift += W_FT_PROTOCOL;
9314 field_shift += W_FT_ETHERTYPE;
9317 field_shift += W_FT_MACMATCH;
9320 field_shift += W_FT_MPSHITTYPE;
9322 case F_FRAGMENTATION:
9323 field_shift += W_FT_FRAGMENTATION;
9330 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id)
9335 struct port_info *p = adap2pinfo(adap, port_id);
9338 for (i = 0, j = -1; i <= p->port_id; i++) {
9341 } while ((adap->params.portvec & (1 << j)) == 0);
9345 p->mps_bg_map = t4_get_mps_bg_map(adap, j);
9346 p->rx_e_chan_map = t4_get_rx_e_chan_map(adap, j);
9349 if (!(adap->flags & IS_VF) ||
9350 adap->params.vfres.r_caps & FW_CMD_CAP_PORT) {
9351 t4_update_port_info(p);
9354 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
9358 p->vi[0].viid = ret;
9359 if (chip_id(adap) <= CHELSIO_T5)
9360 p->vi[0].smt_idx = (ret & 0x7f) << 1;
9362 p->vi[0].smt_idx = (ret & 0x7f);
9363 p->vi[0].rss_size = rss_size;
9364 t4_os_set_hw_addr(p, addr);
9366 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
9367 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
9368 V_FW_PARAMS_PARAM_YZ(p->vi[0].viid);
9369 ret = t4_query_params(adap, mbox, pf, vf, 1, ¶m, &val);
9371 p->vi[0].rss_base = 0xffff;
9373 /* MPASS((val >> 16) == rss_size); */
9374 p->vi[0].rss_base = val & 0xffff;
9381 * t4_read_cimq_cfg - read CIM queue configuration
9382 * @adap: the adapter
9383 * @base: holds the queue base addresses in bytes
9384 * @size: holds the queue sizes in bytes
9385 * @thres: holds the queue full thresholds in bytes
9387 * Returns the current configuration of the CIM queues, starting with
9388 * the IBQs, then the OBQs.
9390 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
9393 int cim_num_obq = adap->chip_params->cim_num_obq;
9395 for (i = 0; i < CIM_NUM_IBQ; i++) {
9396 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
9398 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
9399 /* value is in 256-byte units */
9400 *base++ = G_CIMQBASE(v) * 256;
9401 *size++ = G_CIMQSIZE(v) * 256;
9402 *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
9404 for (i = 0; i < cim_num_obq; i++) {
9405 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
9407 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
9408 /* value is in 256-byte units */
9409 *base++ = G_CIMQBASE(v) * 256;
9410 *size++ = G_CIMQSIZE(v) * 256;
9415 * t4_read_cim_ibq - read the contents of a CIM inbound queue
9416 * @adap: the adapter
9417 * @qid: the queue index
9418 * @data: where to store the queue contents
9419 * @n: capacity of @data in 32-bit words
9421 * Reads the contents of the selected CIM queue starting at address 0 up
9422 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
9423 * error and the number of 32-bit words actually read on success.
9425 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9427 int i, err, attempts;
9429 const unsigned int nwords = CIM_IBQ_SIZE * 4;
9431 if (qid > 5 || (n & 3))
9434 addr = qid * nwords;
9438 /* It might take 3-10ms before the IBQ debug read access is allowed.
9439 * Wait for 1 Sec with a delay of 1 usec.
9443 for (i = 0; i < n; i++, addr++) {
9444 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
9446 err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
9450 *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
9452 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
9457 * t4_read_cim_obq - read the contents of a CIM outbound queue
9458 * @adap: the adapter
9459 * @qid: the queue index
9460 * @data: where to store the queue contents
9461 * @n: capacity of @data in 32-bit words
9463 * Reads the contents of the selected CIM queue starting at address 0 up
9464 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
9465 * error and the number of 32-bit words actually read on success.
9467 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9470 unsigned int addr, v, nwords;
9471 int cim_num_obq = adap->chip_params->cim_num_obq;
9473 if ((qid > (cim_num_obq - 1)) || (n & 3))
9476 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
9477 V_QUENUMSELECT(qid));
9478 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
9480 addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */
9481 nwords = G_CIMQSIZE(v) * 64; /* same */
9485 for (i = 0; i < n; i++, addr++) {
9486 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
9488 err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
9492 *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
9494 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
9500 CIM_CTL_BASE = 0x2000,
9501 CIM_PBT_ADDR_BASE = 0x2800,
9502 CIM_PBT_LRF_BASE = 0x3000,
9503 CIM_PBT_DATA_BASE = 0x3800
9507 * t4_cim_read - read a block from CIM internal address space
9508 * @adap: the adapter
9509 * @addr: the start address within the CIM address space
9510 * @n: number of words to read
9511 * @valp: where to store the result
9513 * Reads a block of 4-byte words from the CIM intenal address space.
9515 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
9520 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
9523 for ( ; !ret && n--; addr += 4) {
9524 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
9525 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
9528 *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
9534 * t4_cim_write - write a block into CIM internal address space
9535 * @adap: the adapter
9536 * @addr: the start address within the CIM address space
9537 * @n: number of words to write
9538 * @valp: set of values to write
9540 * Writes a block of 4-byte words into the CIM intenal address space.
9542 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
9543 const unsigned int *valp)
9547 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
9550 for ( ; !ret && n--; addr += 4) {
9551 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
9552 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
9553 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
9559 static int t4_cim_write1(struct adapter *adap, unsigned int addr,
9562 return t4_cim_write(adap, addr, 1, &val);
9566 * t4_cim_ctl_read - read a block from CIM control region
9567 * @adap: the adapter
9568 * @addr: the start address within the CIM control region
9569 * @n: number of words to read
9570 * @valp: where to store the result
9572 * Reads a block of 4-byte words from the CIM control region.
9574 int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
9577 return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
9581 * t4_cim_read_la - read CIM LA capture buffer
9582 * @adap: the adapter
9583 * @la_buf: where to store the LA data
9584 * @wrptr: the HW write pointer within the capture buffer
9586 * Reads the contents of the CIM LA buffer with the most recent entry at
9587 * the end of the returned data and with the entry at @wrptr first.
9588 * We try to leave the LA in the running state we find it in.
9590 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
9593 unsigned int cfg, val, idx;
9595 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
9599 if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */
9600 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
9605 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
9609 idx = G_UPDBGLAWRPTR(val);
9613 for (i = 0; i < adap->params.cim_la_size; i++) {
9614 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
9615 V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
9618 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
9621 if (val & F_UPDBGLARDEN) {
9625 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
9629 /* address can't exceed 0xfff (UpDbgLaRdPtr is of 12-bits) */
9630 idx = (idx + 1) & M_UPDBGLARDPTR;
9632 * Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
9633 * identify the 32-bit portion of the full 312-bit data
9636 while ((idx & 0xf) > 9)
9637 idx = (idx + 1) % M_UPDBGLARDPTR;
9640 if (cfg & F_UPDBGLAEN) {
9641 int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
9642 cfg & ~F_UPDBGLARDEN);
9650 * t4_tp_read_la - read TP LA capture buffer
9651 * @adap: the adapter
9652 * @la_buf: where to store the LA data
9653 * @wrptr: the HW write pointer within the capture buffer
9655 * Reads the contents of the TP LA buffer with the most recent entry at
9656 * the end of the returned data and with the entry at @wrptr first.
9657 * We leave the LA in the running state we find it in.
9659 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
9661 bool last_incomplete;
9662 unsigned int i, cfg, val, idx;
9664 cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
9665 if (cfg & F_DBGLAENABLE) /* freeze LA */
9666 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
9667 adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
9669 val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
9670 idx = G_DBGLAWPTR(val);
9671 last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
9672 if (last_incomplete)
9673 idx = (idx + 1) & M_DBGLARPTR;
9678 val &= ~V_DBGLARPTR(M_DBGLARPTR);
9679 val |= adap->params.tp.la_mask;
9681 for (i = 0; i < TPLA_SIZE; i++) {
9682 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
9683 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
9684 idx = (idx + 1) & M_DBGLARPTR;
9687 /* Wipe out last entry if it isn't valid */
9688 if (last_incomplete)
9689 la_buf[TPLA_SIZE - 1] = ~0ULL;
9691 if (cfg & F_DBGLAENABLE) /* restore running state */
9692 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
9693 cfg | adap->params.tp.la_mask);
9697 * SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
9698 * seconds). If we find one of the SGE Ingress DMA State Machines in the same
9699 * state for more than the Warning Threshold then we'll issue a warning about
9700 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel
9701 * appears to be hung every Warning Repeat second till the situation clears.
9702 * If the situation clears, we'll note that as well.
9704 #define SGE_IDMA_WARN_THRESH 1
9705 #define SGE_IDMA_WARN_REPEAT 300
9708 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
9709 * @adapter: the adapter
9710 * @idma: the adapter IDMA Monitor state
9712 * Initialize the state of an SGE Ingress DMA Monitor.
9714 void t4_idma_monitor_init(struct adapter *adapter,
9715 struct sge_idma_monitor_state *idma)
9717 /* Initialize the state variables for detecting an SGE Ingress DMA
9718 * hang. The SGE has internal counters which count up on each clock
9719 * tick whenever the SGE finds its Ingress DMA State Engines in the
9720 * same state they were on the previous clock tick. The clock used is
9721 * the Core Clock so we have a limit on the maximum "time" they can
9722 * record; typically a very small number of seconds. For instance,
9723 * with a 600MHz Core Clock, we can only count up to a bit more than
9724 * 7s. So we'll synthesize a larger counter in order to not run the
9725 * risk of having the "timers" overflow and give us the flexibility to
9726 * maintain a Hung SGE State Machine of our own which operates across
9727 * a longer time frame.
9729 idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
9730 idma->idma_stalled[0] = idma->idma_stalled[1] = 0;
9734 * t4_idma_monitor - monitor SGE Ingress DMA state
9735 * @adapter: the adapter
9736 * @idma: the adapter IDMA Monitor state
9737 * @hz: number of ticks/second
9738 * @ticks: number of ticks since the last IDMA Monitor call
9740 void t4_idma_monitor(struct adapter *adapter,
9741 struct sge_idma_monitor_state *idma,
9744 int i, idma_same_state_cnt[2];
9746 /* Read the SGE Debug Ingress DMA Same State Count registers. These
9747 * are counters inside the SGE which count up on each clock when the
9748 * SGE finds its Ingress DMA State Engines in the same states they
9749 * were in the previous clock. The counters will peg out at
9750 * 0xffffffff without wrapping around so once they pass the 1s
9751 * threshold they'll stay above that till the IDMA state changes.
9753 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13);
9754 idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH);
9755 idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
9757 for (i = 0; i < 2; i++) {
9758 u32 debug0, debug11;
9760 /* If the Ingress DMA Same State Counter ("timer") is less
9761 * than 1s, then we can reset our synthesized Stall Timer and
9762 * continue. If we have previously emitted warnings about a
9763 * potential stalled Ingress Queue, issue a note indicating
9764 * that the Ingress Queue has resumed forward progress.
9766 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
9767 if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz)
9768 CH_WARN(adapter, "SGE idma%d, queue %u, "
9769 "resumed after %d seconds\n",
9770 i, idma->idma_qid[i],
9771 idma->idma_stalled[i]/hz);
9772 idma->idma_stalled[i] = 0;
9776 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
9777 * domain. The first time we get here it'll be because we
9778 * passed the 1s Threshold; each additional time it'll be
9779 * because the RX Timer Callback is being fired on its regular
9782 * If the stall is below our Potential Hung Ingress Queue
9783 * Warning Threshold, continue.
9785 if (idma->idma_stalled[i] == 0) {
9786 idma->idma_stalled[i] = hz;
9787 idma->idma_warn[i] = 0;
9789 idma->idma_stalled[i] += ticks;
9790 idma->idma_warn[i] -= ticks;
9793 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz)
9796 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
9798 if (idma->idma_warn[i] > 0)
9800 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz;
9802 /* Read and save the SGE IDMA State and Queue ID information.
9803 * We do this every time in case it changes across time ...
9804 * can't be too careful ...
9806 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0);
9807 debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
9808 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
9810 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11);
9811 debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
9812 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
9814 CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in "
9815 " state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
9816 i, idma->idma_qid[i], idma->idma_state[i],
9817 idma->idma_stalled[i]/hz,
9819 t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
9824 * t4_read_pace_tbl - read the pace table
9825 * @adap: the adapter
9826 * @pace_vals: holds the returned values
9828 * Returns the values of TP's pace table in microseconds.
9830 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
9834 for (i = 0; i < NTX_SCHED; i++) {
9835 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
9836 v = t4_read_reg(adap, A_TP_PACE_TABLE);
9837 pace_vals[i] = dack_ticks_to_usec(adap, v);
9842 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
9843 * @adap: the adapter
9844 * @sched: the scheduler index
9845 * @kbps: the byte rate in Kbps
9846 * @ipg: the interpacket delay in tenths of nanoseconds
9848 * Return the current configuration of a HW Tx scheduler.
9850 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
9851 unsigned int *ipg, bool sleep_ok)
9853 unsigned int v, addr, bpt, cpt;
9856 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
9857 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
9860 bpt = (v >> 8) & 0xff;
9863 *kbps = 0; /* scheduler disabled */
9865 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
9866 *kbps = (v * bpt) / 125;
9870 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
9871 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
9875 *ipg = (10000 * v) / core_ticks_per_usec(adap);
9880 * t4_load_cfg - download config file
9881 * @adap: the adapter
9882 * @cfg_data: the cfg text file to write
9883 * @size: text file size
9885 * Write the supplied config text file to the card's serial flash.
9887 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
9889 int ret, i, n, cfg_addr;
9891 unsigned int flash_cfg_start_sec;
9892 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
9894 cfg_addr = t4_flash_cfg_addr(adap);
9899 flash_cfg_start_sec = addr / SF_SEC_SIZE;
9901 if (size > FLASH_CFG_MAX_SIZE) {
9902 CH_ERR(adap, "cfg file too large, max is %u bytes\n",
9903 FLASH_CFG_MAX_SIZE);
9907 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
9909 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
9910 flash_cfg_start_sec + i - 1);
9912 * If size == 0 then we're simply erasing the FLASH sectors associated
9913 * with the on-adapter Firmware Configuration File.
9915 if (ret || size == 0)
9918 /* this will write to the flash up to SF_PAGE_SIZE at a time */
9919 for (i = 0; i< size; i+= SF_PAGE_SIZE) {
9920 if ( (size - i) < SF_PAGE_SIZE)
9924 ret = t4_write_flash(adap, addr, n, cfg_data, 1);
9928 addr += SF_PAGE_SIZE;
9929 cfg_data += SF_PAGE_SIZE;
9934 CH_ERR(adap, "config file %s failed %d\n",
9935 (size == 0 ? "clear" : "download"), ret);
9940 * t5_fw_init_extern_mem - initialize the external memory
9941 * @adap: the adapter
9943 * Initializes the external memory on T5.
9945 int t5_fw_init_extern_mem(struct adapter *adap)
9947 u32 params[1], val[1];
9953 val[0] = 0xff; /* Initialize all MCs */
9954 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
9955 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT));
9956 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val,
9957 FW_CMD_MAX_TIMEOUT);
9962 /* BIOS boot headers */
9963 typedef struct pci_expansion_rom_header {
9964 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
9965 u8 reserved[22]; /* Reserved per processor Architecture data */
9966 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
9967 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
9969 /* Legacy PCI Expansion ROM Header */
9970 typedef struct legacy_pci_expansion_rom_header {
9971 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
9972 u8 size512; /* Current Image Size in units of 512 bytes */
9973 u8 initentry_point[4];
9974 u8 cksum; /* Checksum computed on the entire Image */
9975 u8 reserved[16]; /* Reserved */
9976 u8 pcir_offset[2]; /* Offset to PCI Data Struture */
9977 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
9979 /* EFI PCI Expansion ROM Header */
9980 typedef struct efi_pci_expansion_rom_header {
9981 u8 signature[2]; // ROM signature. The value 0xaa55
9982 u8 initialization_size[2]; /* Units 512. Includes this header */
9983 u8 efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
9984 u8 efi_subsystem[2]; /* Subsystem value for EFI image header */
9985 u8 efi_machine_type[2]; /* Machine type from EFI image header */
9986 u8 compression_type[2]; /* Compression type. */
9988 * Compression type definition
9991 * 0x2-0xFFFF: Reserved
9993 u8 reserved[8]; /* Reserved */
9994 u8 efi_image_header_offset[2]; /* Offset to EFI Image */
9995 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
9996 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
9998 /* PCI Data Structure Format */
9999 typedef struct pcir_data_structure { /* PCI Data Structure */
10000 u8 signature[4]; /* Signature. The string "PCIR" */
10001 u8 vendor_id[2]; /* Vendor Identification */
10002 u8 device_id[2]; /* Device Identification */
10003 u8 vital_product[2]; /* Pointer to Vital Product Data */
10004 u8 length[2]; /* PCIR Data Structure Length */
10005 u8 revision; /* PCIR Data Structure Revision */
10006 u8 class_code[3]; /* Class Code */
10007 u8 image_length[2]; /* Image Length. Multiple of 512B */
10008 u8 code_revision[2]; /* Revision Level of Code/Data */
10009 u8 code_type; /* Code Type. */
10011 * PCI Expansion ROM Code Types
10012 * 0x00: Intel IA-32, PC-AT compatible. Legacy
10013 * 0x01: Open Firmware standard for PCI. FCODE
10014 * 0x02: Hewlett-Packard PA RISC. HP reserved
10015 * 0x03: EFI Image. EFI
10016 * 0x04-0xFF: Reserved.
10018 u8 indicator; /* Indicator. Identifies the last image in the ROM */
10019 u8 reserved[2]; /* Reserved */
10020 } pcir_data_t; /* PCI__DATA_STRUCTURE */
10022 /* BOOT constants */
10024 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
10025 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */
10026 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */
10027 BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
10028 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment */
10029 VENDOR_ID = 0x1425, /* Vendor ID */
10030 PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
10034 * modify_device_id - Modifies the device ID of the Boot BIOS image
10035 * @adatper: the device ID to write.
10036 * @boot_data: the boot image to modify.
10038 * Write the supplied device ID to the boot BIOS image.
10040 static void modify_device_id(int device_id, u8 *boot_data)
10042 legacy_pci_exp_rom_header_t *header;
10043 pcir_data_t *pcir_header;
10044 u32 cur_header = 0;
10047 * Loop through all chained images and change the device ID's
10050 header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
10051 pcir_header = (pcir_data_t *) &boot_data[cur_header +
10052 le16_to_cpu(*(u16*)header->pcir_offset)];
10055 * Only modify the Device ID if code type is Legacy or HP.
10056 * 0x00: Okay to modify
10057 * 0x01: FCODE. Do not be modify
10058 * 0x03: Okay to modify
10059 * 0x04-0xFF: Do not modify
10061 if (pcir_header->code_type == 0x00) {
10066 * Modify Device ID to match current adatper
10068 *(u16*) pcir_header->device_id = device_id;
10071 * Set checksum temporarily to 0.
10072 * We will recalculate it later.
10074 header->cksum = 0x0;
10077 * Calculate and update checksum
10079 for (i = 0; i < (header->size512 * 512); i++)
10080 csum += (u8)boot_data[cur_header + i];
10083 * Invert summed value to create the checksum
10084 * Writing new checksum value directly to the boot data
10086 boot_data[cur_header + 7] = -csum;
10088 } else if (pcir_header->code_type == 0x03) {
10091 * Modify Device ID to match current adatper
10093 *(u16*) pcir_header->device_id = device_id;
10099 * Check indicator element to identify if this is the last
10100 * image in the ROM.
10102 if (pcir_header->indicator & 0x80)
10106 * Move header pointer up to the next image in the ROM.
10108 cur_header += header->size512 * 512;
10113 * t4_load_boot - download boot flash
10114 * @adapter: the adapter
10115 * @boot_data: the boot image to write
10116 * @boot_addr: offset in flash to write boot_data
10117 * @size: image size
10119 * Write the supplied boot image to the card's serial flash.
10120 * The boot image has the following sections: a 28-byte header and the
10123 int t4_load_boot(struct adapter *adap, u8 *boot_data,
10124 unsigned int boot_addr, unsigned int size)
10126 pci_exp_rom_header_t *header;
10128 pcir_data_t *pcir_header;
10130 uint16_t device_id;
10132 unsigned int boot_sector = (boot_addr * 1024 );
10133 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10136 * Make sure the boot image does not encroach on the firmware region
10138 if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
10139 CH_ERR(adap, "boot image encroaching on firmware region\n");
10144 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
10145 * and Boot configuration data sections. These 3 boot sections span
10146 * sectors 0 to 7 in flash and live right before the FW image location.
10148 i = DIV_ROUND_UP(size ? size : FLASH_FW_START,
10150 ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
10151 (boot_sector >> 16) + i - 1);
10154 * If size == 0 then we're simply erasing the FLASH sectors associated
10155 * with the on-adapter option ROM file
10157 if (ret || (size == 0))
10160 /* Get boot header */
10161 header = (pci_exp_rom_header_t *)boot_data;
10162 pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
10163 /* PCIR Data Structure */
10164 pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
10167 * Perform some primitive sanity testing to avoid accidentally
10168 * writing garbage over the boot sectors. We ought to check for
10169 * more but it's not worth it for now ...
10171 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
10172 CH_ERR(adap, "boot image too small/large\n");
10176 #ifndef CHELSIO_T4_DIAGS
10178 * Check BOOT ROM header signature
10180 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
10181 CH_ERR(adap, "Boot image missing signature\n");
10186 * Check PCI header signature
10188 if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
10189 CH_ERR(adap, "PCI header missing signature\n");
10194 * Check Vendor ID matches Chelsio ID
10196 if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
10197 CH_ERR(adap, "Vendor ID missing signature\n");
10203 * Retrieve adapter's device ID
10205 t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
10206 /* Want to deal with PF 0 so I strip off PF 4 indicator */
10207 device_id = device_id & 0xf0ff;
10210 * Check PCIE Device ID
10212 if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
10214 * Change the device ID in the Boot BIOS image to match
10215 * the Device ID of the current adapter.
10217 modify_device_id(device_id, boot_data);
10221 * Skip over the first SF_PAGE_SIZE worth of data and write it after
10222 * we finish copying the rest of the boot image. This will ensure
10223 * that the BIOS boot header will only be written if the boot image
10224 * was written in full.
10226 addr = boot_sector;
10227 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
10228 addr += SF_PAGE_SIZE;
10229 boot_data += SF_PAGE_SIZE;
10230 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
10235 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
10236 (const u8 *)header, 0);
10240 CH_ERR(adap, "boot image download failed, error %d\n", ret);
10245 * t4_flash_bootcfg_addr - return the address of the flash optionrom configuration
10246 * @adapter: the adapter
10248 * Return the address within the flash where the OptionROM Configuration
10249 * is stored, or an error if the device FLASH is too small to contain
10250 * a OptionROM Configuration.
10252 static int t4_flash_bootcfg_addr(struct adapter *adapter)
10255 * If the device FLASH isn't large enough to hold a Firmware
10256 * Configuration File, return an error.
10258 if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE)
10261 return FLASH_BOOTCFG_START;
10264 int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size)
10266 int ret, i, n, cfg_addr;
10268 unsigned int flash_cfg_start_sec;
10269 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10271 cfg_addr = t4_flash_bootcfg_addr(adap);
10276 flash_cfg_start_sec = addr / SF_SEC_SIZE;
10278 if (size > FLASH_BOOTCFG_MAX_SIZE) {
10279 CH_ERR(adap, "bootcfg file too large, max is %u bytes\n",
10280 FLASH_BOOTCFG_MAX_SIZE);
10284 i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */
10286 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
10287 flash_cfg_start_sec + i - 1);
10290 * If size == 0 then we're simply erasing the FLASH sectors associated
10291 * with the on-adapter OptionROM Configuration File.
10293 if (ret || size == 0)
10296 /* this will write to the flash up to SF_PAGE_SIZE at a time */
10297 for (i = 0; i< size; i+= SF_PAGE_SIZE) {
10298 if ( (size - i) < SF_PAGE_SIZE)
10302 ret = t4_write_flash(adap, addr, n, cfg_data, 0);
10306 addr += SF_PAGE_SIZE;
10307 cfg_data += SF_PAGE_SIZE;
10312 CH_ERR(adap, "boot config data %s failed %d\n",
10313 (size == 0 ? "clear" : "download"), ret);
10318 * t4_set_filter_mode - configure the optional components of filter tuples
10319 * @adap: the adapter
10320 * @mode_map: a bitmap selcting which optional filter components to enable
10321 * @sleep_ok: if true we may sleep while awaiting command completion
10323 * Sets the filter mode by selecting the optional components to enable
10324 * in filter tuples. Returns 0 on success and a negative error if the
10325 * requested mode needs more bits than are available for optional
10328 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map,
10331 static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
10335 for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
10336 if (mode_map & (1 << i))
10338 if (nbits > FILTER_OPT_LEN)
10340 t4_tp_pio_write(adap, &mode_map, 1, A_TP_VLAN_PRI_MAP, sleep_ok);
10341 read_filter_mode_and_ingress_config(adap, sleep_ok);
10347 * t4_clr_port_stats - clear port statistics
10348 * @adap: the adapter
10349 * @idx: the port index
10351 * Clear HW statistics for the given port.
10353 void t4_clr_port_stats(struct adapter *adap, int idx)
10356 u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map;
10357 u32 port_base_addr;
10360 port_base_addr = PORT_BASE(idx);
10362 port_base_addr = T5_PORT_BASE(idx);
10364 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
10365 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
10366 t4_write_reg(adap, port_base_addr + i, 0);
10367 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
10368 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
10369 t4_write_reg(adap, port_base_addr + i, 0);
10370 for (i = 0; i < 4; i++)
10371 if (bgmap & (1 << i)) {
10373 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
10375 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
10380 * t4_i2c_rd - read I2C data from adapter
10381 * @adap: the adapter
10382 * @port: Port number if per-port device; <0 if not
10383 * @devid: per-port device ID or absolute device ID
10384 * @offset: byte offset into device I2C space
10385 * @len: byte length of I2C space data
10386 * @buf: buffer in which to return I2C data
10388 * Reads the I2C data from the indicated device and location.
10390 int t4_i2c_rd(struct adapter *adap, unsigned int mbox,
10391 int port, unsigned int devid,
10392 unsigned int offset, unsigned int len,
10395 u32 ldst_addrspace;
10396 struct fw_ldst_cmd ldst;
10402 len > sizeof ldst.u.i2c.data)
10405 memset(&ldst, 0, sizeof ldst);
10406 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
10407 ldst.op_to_addrspace =
10408 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
10412 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
10413 ldst.u.i2c.pid = (port < 0 ? 0xff : port);
10414 ldst.u.i2c.did = devid;
10415 ldst.u.i2c.boffset = offset;
10416 ldst.u.i2c.blen = len;
10417 ret = t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
10419 memcpy(buf, ldst.u.i2c.data, len);
10424 * t4_i2c_wr - write I2C data to adapter
10425 * @adap: the adapter
10426 * @port: Port number if per-port device; <0 if not
10427 * @devid: per-port device ID or absolute device ID
10428 * @offset: byte offset into device I2C space
10429 * @len: byte length of I2C space data
10430 * @buf: buffer containing new I2C data
10432 * Write the I2C data to the indicated device and location.
10434 int t4_i2c_wr(struct adapter *adap, unsigned int mbox,
10435 int port, unsigned int devid,
10436 unsigned int offset, unsigned int len,
10439 u32 ldst_addrspace;
10440 struct fw_ldst_cmd ldst;
10445 len > sizeof ldst.u.i2c.data)
10448 memset(&ldst, 0, sizeof ldst);
10449 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
10450 ldst.op_to_addrspace =
10451 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
10455 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
10456 ldst.u.i2c.pid = (port < 0 ? 0xff : port);
10457 ldst.u.i2c.did = devid;
10458 ldst.u.i2c.boffset = offset;
10459 ldst.u.i2c.blen = len;
10460 memcpy(ldst.u.i2c.data, buf, len);
10461 return t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
10465 * t4_sge_ctxt_rd - read an SGE context through FW
10466 * @adap: the adapter
10467 * @mbox: mailbox to use for the FW command
10468 * @cid: the context id
10469 * @ctype: the context type
10470 * @data: where to store the context data
10472 * Issues a FW command through the given mailbox to read an SGE context.
10474 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
10475 enum ctxt_type ctype, u32 *data)
10478 struct fw_ldst_cmd c;
10480 if (ctype == CTXT_EGRESS)
10481 ret = FW_LDST_ADDRSPC_SGE_EGRC;
10482 else if (ctype == CTXT_INGRESS)
10483 ret = FW_LDST_ADDRSPC_SGE_INGC;
10484 else if (ctype == CTXT_FLM)
10485 ret = FW_LDST_ADDRSPC_SGE_FLMC;
10487 ret = FW_LDST_ADDRSPC_SGE_CONMC;
10489 memset(&c, 0, sizeof(c));
10490 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
10491 F_FW_CMD_REQUEST | F_FW_CMD_READ |
10492 V_FW_LDST_CMD_ADDRSPACE(ret));
10493 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
10494 c.u.idctxt.physid = cpu_to_be32(cid);
10496 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
10498 data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
10499 data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
10500 data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
10501 data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
10502 data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
10503 data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
10509 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
10510 * @adap: the adapter
10511 * @cid: the context id
10512 * @ctype: the context type
10513 * @data: where to store the context data
10515 * Reads an SGE context directly, bypassing FW. This is only for
10516 * debugging when FW is unavailable.
10518 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
10523 t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
10524 ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
10526 for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
10527 *data++ = t4_read_reg(adap, i);
10531 int t4_sched_config(struct adapter *adapter, int type, int minmaxen,
10534 struct fw_sched_cmd cmd;
10536 memset(&cmd, 0, sizeof(cmd));
10537 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
10540 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
10542 cmd.u.config.sc = FW_SCHED_SC_CONFIG;
10543 cmd.u.config.type = type;
10544 cmd.u.config.minmaxen = minmaxen;
10546 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
10550 int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
10551 int rateunit, int ratemode, int channel, int cl,
10552 int minrate, int maxrate, int weight, int pktsize,
10553 int burstsize, int sleep_ok)
10555 struct fw_sched_cmd cmd;
10557 memset(&cmd, 0, sizeof(cmd));
10558 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
10561 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
10563 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
10564 cmd.u.params.type = type;
10565 cmd.u.params.level = level;
10566 cmd.u.params.mode = mode;
10567 cmd.u.params.ch = channel;
10568 cmd.u.params.cl = cl;
10569 cmd.u.params.unit = rateunit;
10570 cmd.u.params.rate = ratemode;
10571 cmd.u.params.min = cpu_to_be32(minrate);
10572 cmd.u.params.max = cpu_to_be32(maxrate);
10573 cmd.u.params.weight = cpu_to_be16(weight);
10574 cmd.u.params.pktsize = cpu_to_be16(pktsize);
10575 cmd.u.params.burstsize = cpu_to_be16(burstsize);
10577 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
10581 int t4_sched_params_ch_rl(struct adapter *adapter, int channel, int ratemode,
10582 unsigned int maxrate, int sleep_ok)
10584 struct fw_sched_cmd cmd;
10586 memset(&cmd, 0, sizeof(cmd));
10587 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
10590 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
10592 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
10593 cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
10594 cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CH_RL;
10595 cmd.u.params.ch = channel;
10596 cmd.u.params.rate = ratemode; /* REL or ABS */
10597 cmd.u.params.max = cpu_to_be32(maxrate);/* % or kbps */
10599 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
10603 int t4_sched_params_cl_wrr(struct adapter *adapter, int channel, int cl,
10604 int weight, int sleep_ok)
10606 struct fw_sched_cmd cmd;
10608 if (weight < 0 || weight > 100)
10611 memset(&cmd, 0, sizeof(cmd));
10612 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
10615 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
10617 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
10618 cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
10619 cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_WRR;
10620 cmd.u.params.ch = channel;
10621 cmd.u.params.cl = cl;
10622 cmd.u.params.weight = cpu_to_be16(weight);
10624 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
10628 int t4_sched_params_cl_rl_kbps(struct adapter *adapter, int channel, int cl,
10629 int mode, unsigned int maxrate, int pktsize, int sleep_ok)
10631 struct fw_sched_cmd cmd;
10633 memset(&cmd, 0, sizeof(cmd));
10634 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
10637 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
10639 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
10640 cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
10641 cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_RL;
10642 cmd.u.params.mode = mode;
10643 cmd.u.params.ch = channel;
10644 cmd.u.params.cl = cl;
10645 cmd.u.params.unit = FW_SCHED_PARAMS_UNIT_BITRATE;
10646 cmd.u.params.rate = FW_SCHED_PARAMS_RATE_ABS;
10647 cmd.u.params.max = cpu_to_be32(maxrate);
10648 cmd.u.params.pktsize = cpu_to_be16(pktsize);
10650 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
10655 * t4_config_watchdog - configure (enable/disable) a watchdog timer
10656 * @adapter: the adapter
10657 * @mbox: mailbox to use for the FW command
10658 * @pf: the PF owning the queue
10659 * @vf: the VF owning the queue
10660 * @timeout: watchdog timeout in ms
10661 * @action: watchdog timer / action
10663 * There are separate watchdog timers for each possible watchdog
10664 * action. Configure one of the watchdog timers by setting a non-zero
10665 * timeout. Disable a watchdog timer by using a timeout of zero.
10667 int t4_config_watchdog(struct adapter *adapter, unsigned int mbox,
10668 unsigned int pf, unsigned int vf,
10669 unsigned int timeout, unsigned int action)
10671 struct fw_watchdog_cmd wdog;
10672 unsigned int ticks;
10675 * The watchdog command expects a timeout in units of 10ms so we need
10676 * to convert it here (via rounding) and force a minimum of one 10ms
10677 * "tick" if the timeout is non-zero but the conversion results in 0
10680 ticks = (timeout + 5)/10;
10681 if (timeout && !ticks)
10684 memset(&wdog, 0, sizeof wdog);
10685 wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) |
10688 V_FW_PARAMS_CMD_PFN(pf) |
10689 V_FW_PARAMS_CMD_VFN(vf));
10690 wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog));
10691 wdog.timeout = cpu_to_be32(ticks);
10692 wdog.action = cpu_to_be32(action);
10694 return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL);
10697 int t4_get_devlog_level(struct adapter *adapter, unsigned int *level)
10699 struct fw_devlog_cmd devlog_cmd;
10702 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
10703 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
10704 F_FW_CMD_REQUEST | F_FW_CMD_READ);
10705 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
10706 ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
10707 sizeof(devlog_cmd), &devlog_cmd);
10711 *level = devlog_cmd.level;
10715 int t4_set_devlog_level(struct adapter *adapter, unsigned int level)
10717 struct fw_devlog_cmd devlog_cmd;
10719 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
10720 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
10723 devlog_cmd.level = level;
10724 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
10725 return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
10726 sizeof(devlog_cmd), &devlog_cmd);