2 * Copyright (c) 2012, 2016 Chelsio Communications, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
34 #include "t4_regs_values.h"
35 #include "firmware/t4fw_interface.h"
38 #define msleep(x) do { \
42 pause("t4hw", (x) * hz / 1000); \
46 * t4_wait_op_done_val - wait until an operation is completed
47 * @adapter: the adapter performing the operation
48 * @reg: the register to check for completion
49 * @mask: a single-bit field within @reg that indicates completion
50 * @polarity: the value of the field when the operation is completed
51 * @attempts: number of check iterations
52 * @delay: delay in usecs between iterations
53 * @valp: where to store the value of the register at completion time
55 * Wait until an operation is completed by checking a bit in a register
56 * up to @attempts times. If @valp is not NULL the value of the register
57 * at the time it indicated completion is stored there. Returns 0 if the
58 * operation completes and -EAGAIN otherwise.
60 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
61 int polarity, int attempts, int delay, u32 *valp)
64 u32 val = t4_read_reg(adapter, reg);
66 if (!!(val & mask) == polarity) {
78 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
79 int polarity, int attempts, int delay)
81 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
86 * t4_set_reg_field - set a register field to a value
87 * @adapter: the adapter to program
88 * @addr: the register address
89 * @mask: specifies the portion of the register to modify
90 * @val: the new value for the register field
92 * Sets a register field specified by the supplied mask to the
95 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
98 u32 v = t4_read_reg(adapter, addr) & ~mask;
100 t4_write_reg(adapter, addr, v | val);
101 (void) t4_read_reg(adapter, addr); /* flush */
105 * t4_read_indirect - read indirectly addressed registers
107 * @addr_reg: register holding the indirect address
108 * @data_reg: register holding the value of the indirect register
109 * @vals: where the read register values are stored
110 * @nregs: how many indirect registers to read
111 * @start_idx: index of first indirect register to read
113 * Reads registers that are accessed indirectly through an address/data
116 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
117 unsigned int data_reg, u32 *vals,
118 unsigned int nregs, unsigned int start_idx)
121 t4_write_reg(adap, addr_reg, start_idx);
122 *vals++ = t4_read_reg(adap, data_reg);
128 * t4_write_indirect - write indirectly addressed registers
130 * @addr_reg: register holding the indirect addresses
131 * @data_reg: register holding the value for the indirect registers
132 * @vals: values to write
133 * @nregs: how many indirect registers to write
134 * @start_idx: address of first indirect register to write
136 * Writes a sequential block of registers that are accessed indirectly
137 * through an address/data register pair.
139 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
140 unsigned int data_reg, const u32 *vals,
141 unsigned int nregs, unsigned int start_idx)
144 t4_write_reg(adap, addr_reg, start_idx++);
145 t4_write_reg(adap, data_reg, *vals++);
150 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
151 * mechanism. This guarantees that we get the real value even if we're
152 * operating within a Virtual Machine and the Hypervisor is trapping our
153 * Configuration Space accesses.
155 * N.B. This routine should only be used as a last resort: the firmware uses
156 * the backdoor registers on a regular basis and we can end up
157 * conflicting with it's uses!
159 u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
161 u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg);
164 if (chip_id(adap) <= CHELSIO_T5)
172 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req);
173 val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
176 * Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
177 * Configuration Space read. (None of the other fields matter when
178 * F_ENABLE is 0 so a simple register write is easier than a
179 * read-modify-write via t4_set_reg_field().)
181 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0);
187 * t4_report_fw_error - report firmware error
190 * The adapter firmware can indicate error conditions to the host.
191 * If the firmware has indicated an error, print out the reason for
192 * the firmware error.
194 static void t4_report_fw_error(struct adapter *adap)
196 static const char *const reason[] = {
197 "Crash", /* PCIE_FW_EVAL_CRASH */
198 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
199 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
200 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
201 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
202 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
203 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
204 "Reserved", /* reserved */
208 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
209 if (pcie_fw & F_PCIE_FW_ERR)
210 CH_ERR(adap, "Firmware reports adapter error: %s\n",
211 reason[G_PCIE_FW_EVAL(pcie_fw)]);
215 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
217 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
220 for ( ; nflit; nflit--, mbox_addr += 8)
221 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
225 * Handle a FW assertion reported in a mailbox.
227 static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt)
230 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
231 asrt->u.assert.filename_0_7,
232 be32_to_cpu(asrt->u.assert.line),
233 be32_to_cpu(asrt->u.assert.x),
234 be32_to_cpu(asrt->u.assert.y));
237 #define X_CIM_PF_NOACCESS 0xeeeeeeee
239 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
241 * @mbox: index of the mailbox to use
242 * @cmd: the command to write
243 * @size: command length in bytes
244 * @rpl: where to optionally store the reply
245 * @sleep_ok: if true we may sleep while awaiting command completion
246 * @timeout: time to wait for command to finish before timing out
247 * (negative implies @sleep_ok=false)
249 * Sends the given command to FW through the selected mailbox and waits
250 * for the FW to execute the command. If @rpl is not %NULL it is used to
251 * store the FW's reply to the command. The command and its optional
252 * reply are of the same length. Some FW commands like RESET and
253 * INITIALIZE can take a considerable amount of time to execute.
254 * @sleep_ok determines whether we may sleep while awaiting the response.
255 * If sleeping is allowed we use progressive backoff otherwise we spin.
256 * Note that passing in a negative @timeout is an alternate mechanism
257 * for specifying @sleep_ok=false. This is useful when a higher level
258 * interface allows for specification of @timeout but not @sleep_ok ...
260 * The return value is 0 on success or a negative errno on failure. A
261 * failure can happen either because we are not able to execute the
262 * command or FW executes it but signals an error. In the latter case
263 * the return value is the error code indicated by FW (negated).
265 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
266 int size, void *rpl, bool sleep_ok, int timeout)
269 * We delay in small increments at first in an effort to maintain
270 * responsiveness for simple, fast executing commands but then back
271 * off to larger delays to a maximum retry delay.
273 static const int delay[] = {
274 1, 1, 3, 5, 10, 10, 20, 50, 100
278 int i, ms, delay_idx, ret;
279 const __be64 *p = cmd;
280 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
281 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
283 __be64 cmd_rpl[MBOX_LEN/8];
286 if ((size & 15) || size > MBOX_LEN)
289 if (adap->flags & IS_VF) {
291 data_reg = FW_T6VF_MBDATA_BASE_ADDR;
293 data_reg = FW_T4VF_MBDATA_BASE_ADDR;
294 ctl_reg = VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL);
298 * If we have a negative timeout, that implies that we can't sleep.
306 * Attempt to gain access to the mailbox.
308 for (i = 0; i < 4; i++) {
309 ctl = t4_read_reg(adap, ctl_reg);
311 if (v != X_MBOWNER_NONE)
316 * If we were unable to gain access, dequeue ourselves from the
317 * mailbox atomic access list and report the error to our caller.
319 if (v != X_MBOWNER_PL) {
320 t4_report_fw_error(adap);
321 ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
326 * If we gain ownership of the mailbox and there's a "valid" message
327 * in it, this is likely an asynchronous error message from the
328 * firmware. So we'll report that and then proceed on with attempting
329 * to issue our own command ... which may well fail if the error
330 * presaged the firmware crashing ...
332 if (ctl & F_MBMSGVALID) {
333 CH_ERR(adap, "found VALID command in mbox %u: "
334 "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
335 (unsigned long long)t4_read_reg64(adap, data_reg),
336 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
337 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
338 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
339 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
340 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
341 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
342 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
346 * Copy in the new mailbox command and send it on its way ...
348 for (i = 0; i < size; i += 8, p++)
349 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
351 if (adap->flags & IS_VF) {
353 * For the VFs, the Mailbox Data "registers" are
354 * actually backed by T4's "MA" interface rather than
355 * PL Registers (as is the case for the PFs). Because
356 * these are in different coherency domains, the write
357 * to the VF's PL-register-backed Mailbox Control can
358 * race in front of the writes to the MA-backed VF
359 * Mailbox Data "registers". So we need to do a
360 * read-back on at least one byte of the VF Mailbox
361 * Data registers before doing the write to the VF
362 * Mailbox Control register.
364 t4_read_reg(adap, data_reg);
367 CH_DUMP_MBOX(adap, mbox, data_reg);
369 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
370 t4_read_reg(adap, ctl_reg); /* flush write */
376 * Loop waiting for the reply; bail out if we time out or the firmware
380 for (i = 0; i < timeout; i += ms) {
381 if (!(adap->flags & IS_VF)) {
382 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
383 if (pcie_fw & F_PCIE_FW_ERR)
387 ms = delay[delay_idx]; /* last element may repeat */
388 if (delay_idx < ARRAY_SIZE(delay) - 1)
395 v = t4_read_reg(adap, ctl_reg);
396 if (v == X_CIM_PF_NOACCESS)
398 if (G_MBOWNER(v) == X_MBOWNER_PL) {
399 if (!(v & F_MBMSGVALID)) {
400 t4_write_reg(adap, ctl_reg,
401 V_MBOWNER(X_MBOWNER_NONE));
406 * Retrieve the command reply and release the mailbox.
408 get_mbox_rpl(adap, cmd_rpl, MBOX_LEN/8, data_reg);
409 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
411 CH_DUMP_MBOX(adap, mbox, data_reg);
413 res = be64_to_cpu(cmd_rpl[0]);
414 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
415 fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl);
416 res = V_FW_CMD_RETVAL(EIO);
418 memcpy(rpl, cmd_rpl, size);
419 return -G_FW_CMD_RETVAL((int)res);
424 * We timed out waiting for a reply to our mailbox command. Report
425 * the error and also check to see if the firmware reported any
428 ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
429 CH_ERR(adap, "command %#x in mailbox %d timed out\n",
430 *(const u8 *)cmd, mbox);
432 t4_report_fw_error(adap);
437 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
438 void *rpl, bool sleep_ok)
440 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl,
441 sleep_ok, FW_CMD_MAX_TIMEOUT);
445 static int t4_edc_err_read(struct adapter *adap, int idx)
447 u32 edc_ecc_err_addr_reg;
448 u32 edc_bist_status_rdata_reg;
451 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
454 if (idx != 0 && idx != 1) {
455 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
459 edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx);
460 edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx);
463 "edc%d err addr 0x%x: 0x%x.\n",
464 idx, edc_ecc_err_addr_reg,
465 t4_read_reg(adap, edc_ecc_err_addr_reg));
467 "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
468 edc_bist_status_rdata_reg,
469 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg),
470 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8),
471 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16),
472 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24),
473 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32),
474 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40),
475 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48),
476 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56),
477 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64));
483 * t4_mc_read - read from MC through backdoor accesses
485 * @idx: which MC to access
486 * @addr: address of first byte requested
487 * @data: 64 bytes of data containing the requested address
488 * @ecc: where to store the corresponding 64-bit ECC word
490 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
491 * that covers the requested address @addr. If @parity is not %NULL it
492 * is assigned the 64-bit ECC word for the read data.
494 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
497 u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
498 u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
501 mc_bist_cmd_reg = A_MC_BIST_CMD;
502 mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR;
503 mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN;
504 mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA;
505 mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN;
507 mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx);
508 mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx);
509 mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx);
510 mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA,
512 mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN,
516 if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST)
518 t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU);
519 t4_write_reg(adap, mc_bist_cmd_len_reg, 64);
520 t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc);
521 t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) |
522 F_START_BIST | V_BIST_CMD_GAP(1));
523 i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
527 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i)
529 for (i = 15; i >= 0; i--)
530 *data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
532 *ecc = t4_read_reg64(adap, MC_DATA(16));
538 * t4_edc_read - read from EDC through backdoor accesses
540 * @idx: which EDC to access
541 * @addr: address of first byte requested
542 * @data: 64 bytes of data containing the requested address
543 * @ecc: where to store the corresponding 64-bit ECC word
545 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
546 * that covers the requested address @addr. If @parity is not %NULL it
547 * is assigned the 64-bit ECC word for the read data.
549 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
552 u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
553 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
556 edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx);
557 edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx);
558 edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx);
559 edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN,
561 edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA,
565 * These macro are missing in t4_regs.h file.
566 * Added temporarily for testing.
568 #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
569 #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
570 edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx);
571 edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx);
572 edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx);
573 edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN,
575 edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA,
581 if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST)
583 t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU);
584 t4_write_reg(adap, edc_bist_cmd_len_reg, 64);
585 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
586 t4_write_reg(adap, edc_bist_cmd_reg,
587 V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
588 i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
592 #define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i)
594 for (i = 15; i >= 0; i--)
595 *data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
597 *ecc = t4_read_reg64(adap, EDC_DATA(16));
603 * t4_mem_read - read EDC 0, EDC 1 or MC into buffer
605 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
606 * @addr: address within indicated memory type
607 * @len: amount of memory to read
608 * @buf: host memory buffer
610 * Reads an [almost] arbitrary memory region in the firmware: the
611 * firmware memory address, length and host buffer must be aligned on
612 * 32-bit boudaries. The memory is returned as a raw byte sequence from
613 * the firmware's memory. If this memory contains data structures which
614 * contain multi-byte integers, it's the callers responsibility to
615 * perform appropriate byte order conversions.
617 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
620 u32 pos, start, end, offset;
624 * Argument sanity checks ...
626 if ((addr & 0x3) || (len & 0x3))
630 * The underlaying EDC/MC read routines read 64 bytes at a time so we
631 * need to round down the start and round up the end. We'll start
632 * copying out of the first line at (addr - start) a word at a time.
634 start = addr & ~(64-1);
635 end = (addr + len + 64-1) & ~(64-1);
636 offset = (addr - start)/sizeof(__be32);
638 for (pos = start; pos < end; pos += 64, offset = 0) {
642 * Read the chip's memory block and bail if there's an error.
644 if ((mtype == MEM_MC) || (mtype == MEM_MC1))
645 ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL);
647 ret = t4_edc_read(adap, mtype, pos, data, NULL);
652 * Copy the data into the caller's memory buffer.
654 while (offset < 16 && len > 0) {
655 *buf++ = data[offset++];
656 len -= sizeof(__be32);
664 * Return the specified PCI-E Configuration Space register from our Physical
665 * Function. We try first via a Firmware LDST Command (if fw_attach != 0)
666 * since we prefer to let the firmware own all of these registers, but if that
667 * fails we go for it directly ourselves.
669 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach)
673 * If fw_attach != 0, construct and send the Firmware LDST Command to
674 * retrieve the specified PCI-E Configuration Space register.
676 if (drv_fw_attach != 0) {
677 struct fw_ldst_cmd ldst_cmd;
680 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
681 ldst_cmd.op_to_addrspace =
682 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
685 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
686 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
687 ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1);
688 ldst_cmd.u.pcie.ctrl_to_fn =
689 (F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf));
690 ldst_cmd.u.pcie.r = reg;
693 * If the LDST Command succeeds, return the result, otherwise
694 * fall through to reading it directly ourselves ...
696 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
699 return be32_to_cpu(ldst_cmd.u.pcie.data[0]);
701 CH_WARN(adap, "Firmware failed to return "
702 "Configuration Space register %d, err = %d\n",
707 * Read the desired Configuration Space register via the PCI-E
708 * Backdoor mechanism.
710 return t4_hw_pci_read_cfg4(adap, reg);
714 * t4_get_regs_len - return the size of the chips register set
715 * @adapter: the adapter
717 * Returns the size of the chip's BAR0 register space.
719 unsigned int t4_get_regs_len(struct adapter *adapter)
721 unsigned int chip_version = chip_id(adapter);
723 switch (chip_version) {
725 if (adapter->flags & IS_VF)
726 return FW_T4VF_REGMAP_SIZE;
727 return T4_REGMAP_SIZE;
731 if (adapter->flags & IS_VF)
732 return FW_T4VF_REGMAP_SIZE;
733 return T5_REGMAP_SIZE;
737 "Unsupported chip version %d\n", chip_version);
742 * t4_get_regs - read chip registers into provided buffer
744 * @buf: register buffer
745 * @buf_size: size (in bytes) of register buffer
747 * If the provided register buffer isn't large enough for the chip's
748 * full register range, the register dump will be truncated to the
749 * register buffer's size.
751 void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
753 static const unsigned int t4_reg_ranges[] = {
1211 static const unsigned int t4vf_reg_ranges[] = {
1212 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
1213 VF_MPS_REG(A_MPS_VF_CTL),
1214 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
1215 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_WHOAMI),
1216 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
1217 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
1218 FW_T4VF_MBDATA_BASE_ADDR,
1219 FW_T4VF_MBDATA_BASE_ADDR +
1220 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
1223 static const unsigned int t5_reg_ranges[] = {
1998 static const unsigned int t5vf_reg_ranges[] = {
1999 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
2000 VF_MPS_REG(A_MPS_VF_CTL),
2001 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
2002 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION),
2003 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
2004 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
2005 FW_T4VF_MBDATA_BASE_ADDR,
2006 FW_T4VF_MBDATA_BASE_ADDR +
2007 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
2010 static const unsigned int t6_reg_ranges[] = {
2587 static const unsigned int t6vf_reg_ranges[] = {
2588 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
2589 VF_MPS_REG(A_MPS_VF_CTL),
2590 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
2591 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION),
2592 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
2593 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
2594 FW_T6VF_MBDATA_BASE_ADDR,
2595 FW_T6VF_MBDATA_BASE_ADDR +
2596 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
2599 u32 *buf_end = (u32 *)(buf + buf_size);
2600 const unsigned int *reg_ranges;
2601 int reg_ranges_size, range;
2602 unsigned int chip_version = chip_id(adap);
2605 * Select the right set of register ranges to dump depending on the
2606 * adapter chip type.
2608 switch (chip_version) {
2610 if (adap->flags & IS_VF) {
2611 reg_ranges = t4vf_reg_ranges;
2612 reg_ranges_size = ARRAY_SIZE(t4vf_reg_ranges);
2614 reg_ranges = t4_reg_ranges;
2615 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
2620 if (adap->flags & IS_VF) {
2621 reg_ranges = t5vf_reg_ranges;
2622 reg_ranges_size = ARRAY_SIZE(t5vf_reg_ranges);
2624 reg_ranges = t5_reg_ranges;
2625 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
2630 if (adap->flags & IS_VF) {
2631 reg_ranges = t6vf_reg_ranges;
2632 reg_ranges_size = ARRAY_SIZE(t6vf_reg_ranges);
2634 reg_ranges = t6_reg_ranges;
2635 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
2641 "Unsupported chip version %d\n", chip_version);
2646 * Clear the register buffer and insert the appropriate register
2647 * values selected by the above register ranges.
2649 memset(buf, 0, buf_size);
2650 for (range = 0; range < reg_ranges_size; range += 2) {
2651 unsigned int reg = reg_ranges[range];
2652 unsigned int last_reg = reg_ranges[range + 1];
2653 u32 *bufp = (u32 *)(buf + reg);
2656 * Iterate across the register range filling in the register
2657 * buffer but don't write past the end of the register buffer.
2659 while (reg <= last_reg && bufp < buf_end) {
2660 *bufp++ = t4_read_reg(adap, reg);
2667 * Partial EEPROM Vital Product Data structure. Includes only the ID and
2679 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
2681 #define EEPROM_DELAY 10 /* 10us per poll spin */
2682 #define EEPROM_MAX_POLL 5000 /* x 5000 == 50ms */
2684 #define EEPROM_STAT_ADDR 0x7bfc
2685 #define VPD_BASE 0x400
2686 #define VPD_BASE_OLD 0
2687 #define VPD_LEN 1024
2688 #define VPD_INFO_FLD_HDR_SIZE 3
2689 #define CHELSIO_VPD_UNIQUE_ID 0x82
2692 * Small utility function to wait till any outstanding VPD Access is complete.
2693 * We have a per-adapter state variable "VPD Busy" to indicate when we have a
2694 * VPD Access in flight. This allows us to handle the problem of having a
2695 * previous VPD Access time out and prevent an attempt to inject a new VPD
2696 * Request before any in-flight VPD reguest has completed.
2698 static int t4_seeprom_wait(struct adapter *adapter)
2700 unsigned int base = adapter->params.pci.vpd_cap_addr;
2704 * If no VPD Access is in flight, we can just return success right
2707 if (!adapter->vpd_busy)
2711 * Poll the VPD Capability Address/Flag register waiting for it
2712 * to indicate that the operation is complete.
2714 max_poll = EEPROM_MAX_POLL;
2718 udelay(EEPROM_DELAY);
2719 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
2722 * If the operation is complete, mark the VPD as no longer
2723 * busy and return success.
2725 if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
2726 adapter->vpd_busy = 0;
2729 } while (--max_poll);
2732 * Failure! Note that we leave the VPD Busy status set in order to
2733 * avoid pushing a new VPD Access request into the VPD Capability till
2734 * the current operation eventually succeeds. It's a bug to issue a
2735 * new request when an existing request is in flight and will result
2736 * in corrupt hardware state.
2742 * t4_seeprom_read - read a serial EEPROM location
2743 * @adapter: adapter to read
2744 * @addr: EEPROM virtual address
2745 * @data: where to store the read data
2747 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
2748 * VPD capability. Note that this function must be called with a virtual
2751 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
2753 unsigned int base = adapter->params.pci.vpd_cap_addr;
2757 * VPD Accesses must alway be 4-byte aligned!
2759 if (addr >= EEPROMVSIZE || (addr & 3))
2763 * Wait for any previous operation which may still be in flight to
2766 ret = t4_seeprom_wait(adapter);
2768 CH_ERR(adapter, "VPD still busy from previous operation\n");
2773 * Issue our new VPD Read request, mark the VPD as being busy and wait
2774 * for our request to complete. If it doesn't complete, note the
2775 * error and return it to our caller. Note that we do not reset the
2778 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
2779 adapter->vpd_busy = 1;
2780 adapter->vpd_flag = PCI_VPD_ADDR_F;
2781 ret = t4_seeprom_wait(adapter);
2783 CH_ERR(adapter, "VPD read of address %#x failed\n", addr);
2788 * Grab the returned data, swizzle it into our endianess and
2791 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
2792 *data = le32_to_cpu(*data);
2797 * t4_seeprom_write - write a serial EEPROM location
2798 * @adapter: adapter to write
2799 * @addr: virtual EEPROM address
2800 * @data: value to write
2802 * Write a 32-bit word to a location in serial EEPROM using the card's PCI
2803 * VPD capability. Note that this function must be called with a virtual
2806 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
2808 unsigned int base = adapter->params.pci.vpd_cap_addr;
2814 * VPD Accesses must alway be 4-byte aligned!
2816 if (addr >= EEPROMVSIZE || (addr & 3))
2820 * Wait for any previous operation which may still be in flight to
2823 ret = t4_seeprom_wait(adapter);
2825 CH_ERR(adapter, "VPD still busy from previous operation\n");
2830 * Issue our new VPD Read request, mark the VPD as being busy and wait
2831 * for our request to complete. If it doesn't complete, note the
2832 * error and return it to our caller. Note that we do not reset the
2835 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
2837 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
2838 (u16)addr | PCI_VPD_ADDR_F);
2839 adapter->vpd_busy = 1;
2840 adapter->vpd_flag = 0;
2841 ret = t4_seeprom_wait(adapter);
2843 CH_ERR(adapter, "VPD write of address %#x failed\n", addr);
2848 * Reset PCI_VPD_DATA register after a transaction and wait for our
2849 * request to complete. If it doesn't complete, return error.
2851 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
2852 max_poll = EEPROM_MAX_POLL;
2854 udelay(EEPROM_DELAY);
2855 t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
2856 } while ((stats_reg & 0x1) && --max_poll);
2860 /* Return success! */
2865 * t4_eeprom_ptov - translate a physical EEPROM address to virtual
2866 * @phys_addr: the physical EEPROM address
2867 * @fn: the PCI function number
2868 * @sz: size of function-specific area
2870 * Translate a physical EEPROM address to virtual. The first 1K is
2871 * accessed through virtual addresses starting at 31K, the rest is
2872 * accessed through virtual addresses starting at 0.
2874 * The mapping is as follows:
2875 * [0..1K) -> [31K..32K)
2876 * [1K..1K+A) -> [ES-A..ES)
2877 * [1K+A..ES) -> [0..ES-A-1K)
2879 * where A = @fn * @sz, and ES = EEPROM size.
2881 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2884 if (phys_addr < 1024)
2885 return phys_addr + (31 << 10);
2886 if (phys_addr < 1024 + fn)
2887 return EEPROMSIZE - fn + phys_addr - 1024;
2888 if (phys_addr < EEPROMSIZE)
2889 return phys_addr - 1024 - fn;
2894 * t4_seeprom_wp - enable/disable EEPROM write protection
2895 * @adapter: the adapter
2896 * @enable: whether to enable or disable write protection
2898 * Enables or disables write protection on the serial EEPROM.
2900 int t4_seeprom_wp(struct adapter *adapter, int enable)
2902 return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
2906 * get_vpd_keyword_val - Locates an information field keyword in the VPD
2907 * @v: Pointer to buffered vpd data structure
2908 * @kw: The keyword to search for
2910 * Returns the value of the information field keyword or
2911 * -ENOENT otherwise.
2913 static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
2916 unsigned int offset , len;
2917 const u8 *buf = (const u8 *)v;
2918 const u8 *vpdr_len = &v->vpdr_len[0];
2919 offset = sizeof(struct t4_vpd_hdr);
2920 len = (u16)vpdr_len[0] + ((u16)vpdr_len[1] << 8);
2922 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
2926 for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
2927 if(memcmp(buf + i , kw , 2) == 0){
2928 i += VPD_INFO_FLD_HDR_SIZE;
2932 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
2940 * get_vpd_params - read VPD parameters from VPD EEPROM
2941 * @adapter: adapter to read
2942 * @p: where to store the parameters
2943 * @vpd: caller provided temporary space to read the VPD into
2945 * Reads card parameters stored in VPD EEPROM.
2947 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p,
2953 const struct t4_vpd_hdr *v;
2956 * Card information normally starts at VPD_BASE but early cards had
2959 ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd));
2964 * The VPD shall have a unique identifier specified by the PCI SIG.
2965 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
2966 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
2967 * is expected to automatically put this entry at the
2968 * beginning of the VPD.
2970 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
2972 for (i = 0; i < VPD_LEN; i += 4) {
2973 ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i));
2977 v = (const struct t4_vpd_hdr *)vpd;
2979 #define FIND_VPD_KW(var,name) do { \
2980 var = get_vpd_keyword_val(v , name); \
2982 CH_ERR(adapter, "missing VPD keyword " name "\n"); \
2987 FIND_VPD_KW(i, "RV");
2988 for (csum = 0; i >= 0; i--)
2993 "corrupted VPD EEPROM, actual csum %u\n", csum);
2997 FIND_VPD_KW(ec, "EC");
2998 FIND_VPD_KW(sn, "SN");
2999 FIND_VPD_KW(pn, "PN");
3000 FIND_VPD_KW(na, "NA");
3003 memcpy(p->id, v->id_data, ID_LEN);
3005 memcpy(p->ec, vpd + ec, EC_LEN);
3007 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
3008 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
3010 i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2];
3011 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
3012 strstrip((char *)p->pn);
3013 i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2];
3014 memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
3015 strstrip((char *)p->na);
3020 /* serial flash and firmware constants and flash config file constants */
3022 SF_ATTEMPTS = 10, /* max retries for SF operations */
3024 /* flash command opcodes */
3025 SF_PROG_PAGE = 2, /* program page */
3026 SF_WR_DISABLE = 4, /* disable writes */
3027 SF_RD_STATUS = 5, /* read status register */
3028 SF_WR_ENABLE = 6, /* enable writes */
3029 SF_RD_DATA_FAST = 0xb, /* read flash */
3030 SF_RD_ID = 0x9f, /* read ID */
3031 SF_ERASE_SECTOR = 0xd8, /* erase sector */
3035 * sf1_read - read data from the serial flash
3036 * @adapter: the adapter
3037 * @byte_cnt: number of bytes to read
3038 * @cont: whether another operation will be chained
3039 * @lock: whether to lock SF for PL access only
3040 * @valp: where to store the read data
3042 * Reads up to 4 bytes of data from the serial flash. The location of
3043 * the read needs to be specified prior to calling this by issuing the
3044 * appropriate commands to the serial flash.
3046 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
3047 int lock, u32 *valp)
3051 if (!byte_cnt || byte_cnt > 4)
3053 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3055 t4_write_reg(adapter, A_SF_OP,
3056 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
3057 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3059 *valp = t4_read_reg(adapter, A_SF_DATA);
3064 * sf1_write - write data to the serial flash
3065 * @adapter: the adapter
3066 * @byte_cnt: number of bytes to write
3067 * @cont: whether another operation will be chained
3068 * @lock: whether to lock SF for PL access only
3069 * @val: value to write
3071 * Writes up to 4 bytes of data to the serial flash. The location of
3072 * the write needs to be specified prior to calling this by issuing the
3073 * appropriate commands to the serial flash.
3075 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
3078 if (!byte_cnt || byte_cnt > 4)
3080 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3082 t4_write_reg(adapter, A_SF_DATA, val);
3083 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
3084 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
3085 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3089 * flash_wait_op - wait for a flash operation to complete
3090 * @adapter: the adapter
3091 * @attempts: max number of polls of the status register
3092 * @delay: delay between polls in ms
3094 * Wait for a flash operation to complete by polling the status register.
3096 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
3102 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
3103 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
3107 if (--attempts == 0)
3115 * t4_read_flash - read words from serial flash
3116 * @adapter: the adapter
3117 * @addr: the start address for the read
3118 * @nwords: how many 32-bit words to read
3119 * @data: where to store the read data
3120 * @byte_oriented: whether to store data as bytes or as words
3122 * Read the specified number of 32-bit words from the serial flash.
3123 * If @byte_oriented is set the read data is stored as a byte array
3124 * (i.e., big-endian), otherwise as 32-bit words in the platform's
3125 * natural endianness.
3127 int t4_read_flash(struct adapter *adapter, unsigned int addr,
3128 unsigned int nwords, u32 *data, int byte_oriented)
3132 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
3135 addr = swab32(addr) | SF_RD_DATA_FAST;
3137 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
3138 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
3141 for ( ; nwords; nwords--, data++) {
3142 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
3144 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3148 *data = (__force __u32)(cpu_to_be32(*data));
3154 * t4_write_flash - write up to a page of data to the serial flash
3155 * @adapter: the adapter
3156 * @addr: the start address to write
3157 * @n: length of data to write in bytes
3158 * @data: the data to write
3159 * @byte_oriented: whether to store data as bytes or as words
3161 * Writes up to a page of data (256 bytes) to the serial flash starting
3162 * at the given address. All the data must be written to the same page.
3163 * If @byte_oriented is set the write data is stored as byte stream
3164 * (i.e. matches what on disk), otherwise in big-endian.
3166 int t4_write_flash(struct adapter *adapter, unsigned int addr,
3167 unsigned int n, const u8 *data, int byte_oriented)
3170 u32 buf[SF_PAGE_SIZE / 4];
3171 unsigned int i, c, left, val, offset = addr & 0xff;
3173 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
3176 val = swab32(addr) | SF_PROG_PAGE;
3178 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3179 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
3182 for (left = n; left; left -= c) {
3184 for (val = 0, i = 0; i < c; ++i)
3185 val = (val << 8) + *data++;
3188 val = cpu_to_be32(val);
3190 ret = sf1_write(adapter, c, c != left, 1, val);
3194 ret = flash_wait_op(adapter, 8, 1);
3198 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3200 /* Read the page to verify the write succeeded */
3201 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
3206 if (memcmp(data - n, (u8 *)buf + offset, n)) {
3208 "failed to correctly write the flash page at %#x\n",
3215 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3220 * t4_get_fw_version - read the firmware version
3221 * @adapter: the adapter
3222 * @vers: where to place the version
3224 * Reads the FW version from flash.
3226 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
3228 return t4_read_flash(adapter, FLASH_FW_START +
3229 offsetof(struct fw_hdr, fw_ver), 1,
3234 * t4_get_bs_version - read the firmware bootstrap version
3235 * @adapter: the adapter
3236 * @vers: where to place the version
3238 * Reads the FW Bootstrap version from flash.
3240 int t4_get_bs_version(struct adapter *adapter, u32 *vers)
3242 return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
3243 offsetof(struct fw_hdr, fw_ver), 1,
3248 * t4_get_tp_version - read the TP microcode version
3249 * @adapter: the adapter
3250 * @vers: where to place the version
3252 * Reads the TP microcode version from flash.
3254 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
3256 return t4_read_flash(adapter, FLASH_FW_START +
3257 offsetof(struct fw_hdr, tp_microcode_ver),
3262 * t4_get_exprom_version - return the Expansion ROM version (if any)
3263 * @adapter: the adapter
3264 * @vers: where to place the version
3266 * Reads the Expansion ROM header from FLASH and returns the version
3267 * number (if present) through the @vers return value pointer. We return
3268 * this in the Firmware Version Format since it's convenient. Return
3269 * 0 on success, -ENOENT if no Expansion ROM is present.
3271 int t4_get_exprom_version(struct adapter *adap, u32 *vers)
3273 struct exprom_header {
3274 unsigned char hdr_arr[16]; /* must start with 0x55aa */
3275 unsigned char hdr_ver[4]; /* Expansion ROM version */
3277 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
3281 ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
3282 ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
3287 hdr = (struct exprom_header *)exprom_header_buf;
3288 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
3291 *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
3292 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
3293 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
3294 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
3299 * t4_get_scfg_version - return the Serial Configuration version
3300 * @adapter: the adapter
3301 * @vers: where to place the version
3303 * Reads the Serial Configuration Version via the Firmware interface
3304 * (thus this can only be called once we're ready to issue Firmware
3305 * commands). The format of the Serial Configuration version is
3306 * adapter specific. Returns 0 on success, an error on failure.
3308 * Note that early versions of the Firmware didn't include the ability
3309 * to retrieve the Serial Configuration version, so we zero-out the
3310 * return-value parameter in that case to avoid leaving it with
3313 * Also note that the Firmware will return its cached copy of the Serial
3314 * Initialization Revision ID, not the actual Revision ID as written in
3315 * the Serial EEPROM. This is only an issue if a new VPD has been written
3316 * and the Firmware/Chip haven't yet gone through a RESET sequence. So
3317 * it's best to defer calling this routine till after a FW_RESET_CMD has
3318 * been issued if the Host Driver will be performing a full adapter
3321 int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
3326 scfgrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3327 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_SCFGREV));
3328 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3329 1, &scfgrev_param, vers);
3336 * t4_get_vpd_version - return the VPD version
3337 * @adapter: the adapter
3338 * @vers: where to place the version
3340 * Reads the VPD via the Firmware interface (thus this can only be called
3341 * once we're ready to issue Firmware commands). The format of the
3342 * VPD version is adapter specific. Returns 0 on success, an error on
3345 * Note that early versions of the Firmware didn't include the ability
3346 * to retrieve the VPD version, so we zero-out the return-value parameter
3347 * in that case to avoid leaving it with garbage in it.
3349 * Also note that the Firmware will return its cached copy of the VPD
3350 * Revision ID, not the actual Revision ID as written in the Serial
3351 * EEPROM. This is only an issue if a new VPD has been written and the
3352 * Firmware/Chip haven't yet gone through a RESET sequence. So it's best
3353 * to defer calling this routine till after a FW_RESET_CMD has been issued
3354 * if the Host Driver will be performing a full adapter initialization.
3356 int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
3361 vpdrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3362 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_VPDREV));
3363 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3364 1, &vpdrev_param, vers);
3371 * t4_get_version_info - extract various chip/firmware version information
3372 * @adapter: the adapter
3374 * Reads various chip/firmware version numbers and stores them into the
3375 * adapter Adapter Parameters structure. If any of the efforts fails
3376 * the first failure will be returned, but all of the version numbers
3379 int t4_get_version_info(struct adapter *adapter)
3383 #define FIRST_RET(__getvinfo) \
3385 int __ret = __getvinfo; \
3386 if (__ret && !ret) \
3390 FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
3391 FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
3392 FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
3393 FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
3394 FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
3395 FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
3403 * t4_flash_erase_sectors - erase a range of flash sectors
3404 * @adapter: the adapter
3405 * @start: the first sector to erase
3406 * @end: the last sector to erase
3408 * Erases the sectors in the given inclusive range.
3410 int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
3414 if (end >= adapter->params.sf_nsec)
3417 while (start <= end) {
3418 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3419 (ret = sf1_write(adapter, 4, 0, 1,
3420 SF_ERASE_SECTOR | (start << 8))) != 0 ||
3421 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
3423 "erase of flash sector %d failed, error %d\n",
3429 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3434 * t4_flash_cfg_addr - return the address of the flash configuration file
3435 * @adapter: the adapter
3437 * Return the address within the flash where the Firmware Configuration
3438 * File is stored, or an error if the device FLASH is too small to contain
3439 * a Firmware Configuration File.
3441 int t4_flash_cfg_addr(struct adapter *adapter)
3444 * If the device FLASH isn't large enough to hold a Firmware
3445 * Configuration File, return an error.
3447 if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
3450 return FLASH_CFG_START;
3454 * Return TRUE if the specified firmware matches the adapter. I.e. T4
3455 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
3456 * and emit an error message for mismatched firmware to save our caller the
3459 static int t4_fw_matches_chip(struct adapter *adap,
3460 const struct fw_hdr *hdr)
3463 * The expression below will return FALSE for any unsupported adapter
3464 * which will keep us "honest" in the future ...
3466 if ((is_t4(adap) && hdr->chip == FW_HDR_CHIP_T4) ||
3467 (is_t5(adap) && hdr->chip == FW_HDR_CHIP_T5) ||
3468 (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6))
3472 "FW image (%d) is not suitable for this adapter (%d)\n",
3473 hdr->chip, chip_id(adap));
3478 * t4_load_fw - download firmware
3479 * @adap: the adapter
3480 * @fw_data: the firmware image to write
3483 * Write the supplied firmware image to the card's serial flash.
3485 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3490 u8 first_page[SF_PAGE_SIZE];
3491 const u32 *p = (const u32 *)fw_data;
3492 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
3493 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
3494 unsigned int fw_start_sec;
3495 unsigned int fw_start;
3496 unsigned int fw_size;
3498 if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) {
3499 fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
3500 fw_start = FLASH_FWBOOTSTRAP_START;
3501 fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
3503 fw_start_sec = FLASH_FW_START_SEC;
3504 fw_start = FLASH_FW_START;
3505 fw_size = FLASH_FW_MAX_SIZE;
3509 CH_ERR(adap, "FW image has no data\n");
3514 "FW image size not multiple of 512 bytes\n");
3517 if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) {
3519 "FW image size differs from size in FW header\n");
3522 if (size > fw_size) {
3523 CH_ERR(adap, "FW image too large, max is %u bytes\n",
3527 if (!t4_fw_matches_chip(adap, hdr))
3530 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
3531 csum += be32_to_cpu(p[i]);
3533 if (csum != 0xffffffff) {
3535 "corrupted firmware image, checksum %#x\n", csum);
3539 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
3540 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
3545 * We write the correct version at the end so the driver can see a bad
3546 * version if the FW write fails. Start by writing a copy of the
3547 * first page with a bad version.
3549 memcpy(first_page, fw_data, SF_PAGE_SIZE);
3550 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
3551 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
3556 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
3557 addr += SF_PAGE_SIZE;
3558 fw_data += SF_PAGE_SIZE;
3559 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
3564 ret = t4_write_flash(adap,
3565 fw_start + offsetof(struct fw_hdr, fw_ver),
3566 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
3569 CH_ERR(adap, "firmware download failed, error %d\n",
3575 * t4_fwcache - firmware cache operation
3576 * @adap: the adapter
3577 * @op : the operation (flush or flush and invalidate)
3579 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
3581 struct fw_params_cmd c;
3583 memset(&c, 0, sizeof(c));
3585 cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
3586 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
3587 V_FW_PARAMS_CMD_PFN(adap->pf) |
3588 V_FW_PARAMS_CMD_VFN(0));
3589 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3591 cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3592 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE));
3593 c.param[0].val = (__force __be32)op;
3595 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
3598 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
3599 unsigned int *pif_req_wrptr,
3600 unsigned int *pif_rsp_wrptr)
3603 u32 cfg, val, req, rsp;
3605 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
3606 if (cfg & F_LADBGEN)
3607 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
3609 val = t4_read_reg(adap, A_CIM_DEBUGSTS);
3610 req = G_POLADBGWRPTR(val);
3611 rsp = G_PILADBGWRPTR(val);
3613 *pif_req_wrptr = req;
3615 *pif_rsp_wrptr = rsp;
3617 for (i = 0; i < CIM_PIFLA_SIZE; i++) {
3618 for (j = 0; j < 6; j++) {
3619 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
3620 V_PILADBGRDPTR(rsp));
3621 *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
3622 *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
3626 req = (req + 2) & M_POLADBGRDPTR;
3627 rsp = (rsp + 2) & M_PILADBGRDPTR;
3629 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
3632 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
3637 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
3638 if (cfg & F_LADBGEN)
3639 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
3641 for (i = 0; i < CIM_MALA_SIZE; i++) {
3642 for (j = 0; j < 5; j++) {
3644 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
3645 V_PILADBGRDPTR(idx));
3646 *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
3647 *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
3650 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
3653 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
3657 for (i = 0; i < 8; i++) {
3658 u32 *p = la_buf + i;
3660 t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
3661 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
3662 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
3663 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
3664 *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
3668 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
3669 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \
3670 FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \
3674 * t4_link_l1cfg - apply link configuration to MAC/PHY
3675 * @phy: the PHY to setup
3676 * @mac: the MAC to setup
3677 * @lc: the requested link configuration
3679 * Set up a port's MAC and PHY according to a desired link configuration.
3680 * - If the PHY can auto-negotiate first decide what to advertise, then
3681 * enable/disable auto-negotiation as desired, and reset.
3682 * - If the PHY does not auto-negotiate just reset it.
3683 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
3684 * otherwise do it later based on the outcome of auto-negotiation.
3686 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
3687 struct link_config *lc)
3689 struct fw_port_cmd c;
3690 unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
3693 if (lc->requested_fc & PAUSE_RX)
3694 fc |= FW_PORT_CAP_FC_RX;
3695 if (lc->requested_fc & PAUSE_TX)
3696 fc |= FW_PORT_CAP_FC_TX;
3698 memset(&c, 0, sizeof(c));
3699 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
3700 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
3701 V_FW_PORT_CMD_PORTID(port));
3703 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
3706 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
3707 c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
3709 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
3710 } else if (lc->autoneg == AUTONEG_DISABLE) {
3711 c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | mdi);
3712 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
3714 c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | mdi);
3716 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3720 * t4_restart_aneg - restart autonegotiation
3721 * @adap: the adapter
3722 * @mbox: mbox to use for the FW command
3723 * @port: the port id
3725 * Restarts autonegotiation for the selected port.
3727 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
3729 struct fw_port_cmd c;
3731 memset(&c, 0, sizeof(c));
3732 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
3733 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
3734 V_FW_PORT_CMD_PORTID(port));
3736 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
3738 c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
3739 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3742 typedef void (*int_handler_t)(struct adapter *adap);
3745 unsigned int mask; /* bits to check in interrupt status */
3746 const char *msg; /* message to print or NULL */
3747 short stat_idx; /* stat counter to increment or -1 */
3748 unsigned short fatal; /* whether the condition reported is fatal */
3749 int_handler_t int_handler; /* platform-specific int handler */
3753 * t4_handle_intr_status - table driven interrupt handler
3754 * @adapter: the adapter that generated the interrupt
3755 * @reg: the interrupt status register to process
3756 * @acts: table of interrupt actions
3758 * A table driven interrupt handler that applies a set of masks to an
3759 * interrupt status word and performs the corresponding actions if the
3760 * interrupts described by the mask have occurred. The actions include
3761 * optionally emitting a warning or alert message. The table is terminated
3762 * by an entry specifying mask 0. Returns the number of fatal interrupt
3765 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
3766 const struct intr_info *acts)
3769 unsigned int mask = 0;
3770 unsigned int status = t4_read_reg(adapter, reg);
3772 for ( ; acts->mask; ++acts) {
3773 if (!(status & acts->mask))
3777 CH_ALERT(adapter, "%s (0x%x)\n", acts->msg,
3778 status & acts->mask);
3779 } else if (acts->msg)
3780 CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n", acts->msg,
3781 status & acts->mask);
3782 if (acts->int_handler)
3783 acts->int_handler(adapter);
3787 if (status) /* clear processed interrupts */
3788 t4_write_reg(adapter, reg, status);
3793 * Interrupt handler for the PCIE module.
3795 static void pcie_intr_handler(struct adapter *adapter)
3797 static const struct intr_info sysbus_intr_info[] = {
3798 { F_RNPP, "RXNP array parity error", -1, 1 },
3799 { F_RPCP, "RXPC array parity error", -1, 1 },
3800 { F_RCIP, "RXCIF array parity error", -1, 1 },
3801 { F_RCCP, "Rx completions control array parity error", -1, 1 },
3802 { F_RFTP, "RXFT array parity error", -1, 1 },
3805 static const struct intr_info pcie_port_intr_info[] = {
3806 { F_TPCP, "TXPC array parity error", -1, 1 },
3807 { F_TNPP, "TXNP array parity error", -1, 1 },
3808 { F_TFTP, "TXFT array parity error", -1, 1 },
3809 { F_TCAP, "TXCA array parity error", -1, 1 },
3810 { F_TCIP, "TXCIF array parity error", -1, 1 },
3811 { F_RCAP, "RXCA array parity error", -1, 1 },
3812 { F_OTDD, "outbound request TLP discarded", -1, 1 },
3813 { F_RDPE, "Rx data parity error", -1, 1 },
3814 { F_TDUE, "Tx uncorrectable data error", -1, 1 },
3817 static const struct intr_info pcie_intr_info[] = {
3818 { F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
3819 { F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
3820 { F_MSIDATAPERR, "MSI data parity error", -1, 1 },
3821 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
3822 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
3823 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
3824 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
3825 { F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
3826 { F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
3827 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
3828 { F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
3829 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
3830 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
3831 { F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
3832 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
3833 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
3834 { F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
3835 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
3836 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
3837 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
3838 { F_FIDPERR, "PCI FID parity error", -1, 1 },
3839 { F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
3840 { F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
3841 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
3842 { F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
3843 { F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
3844 { F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
3845 { F_PCIESINT, "PCI core secondary fault", -1, 1 },
3846 { F_PCIEPINT, "PCI core primary fault", -1, 1 },
3847 { F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
3852 static const struct intr_info t5_pcie_intr_info[] = {
3853 { F_MSTGRPPERR, "Master Response Read Queue parity error",
3855 { F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
3856 { F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
3857 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
3858 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
3859 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
3860 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
3861 { F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
3863 { F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
3865 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
3866 { F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
3867 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
3868 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
3869 { F_DREQWRPERR, "PCI DMA channel write request parity error",
3871 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
3872 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
3873 { F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
3874 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
3875 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
3876 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
3877 { F_FIDPERR, "PCI FID parity error", -1, 1 },
3878 { F_VFIDPERR, "PCI INTx clear parity error", -1, 1 },
3879 { F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
3880 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
3881 { F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
3883 { F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
3885 { F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
3886 { F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
3887 { F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
3888 { F_READRSPERR, "Outbound read error", -1,
3896 fat = t4_handle_intr_status(adapter,
3897 A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
3899 t4_handle_intr_status(adapter,
3900 A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
3901 pcie_port_intr_info) +
3902 t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
3905 fat = t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
3908 t4_fatal_err(adapter);
3912 * TP interrupt handler.
3914 static void tp_intr_handler(struct adapter *adapter)
3916 static const struct intr_info tp_intr_info[] = {
3917 { 0x3fffffff, "TP parity error", -1, 1 },
3918 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
3922 if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
3923 t4_fatal_err(adapter);
3927 * SGE interrupt handler.
3929 static void sge_intr_handler(struct adapter *adapter)
3934 static const struct intr_info sge_intr_info[] = {
3935 { F_ERR_CPL_EXCEED_IQE_SIZE,
3936 "SGE received CPL exceeding IQE size", -1, 1 },
3937 { F_ERR_INVALID_CIDX_INC,
3938 "SGE GTS CIDX increment too large", -1, 0 },
3939 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
3940 { F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
3941 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
3942 "SGE IQID > 1023 received CPL for FL", -1, 0 },
3943 { F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
3945 { F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
3947 { F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
3949 { F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
3951 { F_ERR_ING_CTXT_PRIO,
3952 "SGE too many priority ingress contexts", -1, 0 },
3953 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
3954 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
3958 static const struct intr_info t4t5_sge_intr_info[] = {
3959 { F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
3960 { F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
3961 { F_ERR_EGR_CTXT_PRIO,
3962 "SGE too many priority egress contexts", -1, 0 },
3967 * For now, treat below interrupts as fatal so that we disable SGE and
3968 * get better debug */
3969 static const struct intr_info t6_sge_intr_info[] = {
3970 { F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1,
3971 "SGE PCIe error for a DBP thread", -1, 1 },
3973 "SGE Actual WRE packet is less than advertized length",
3978 v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
3979 ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
3981 CH_ALERT(adapter, "SGE parity error (%#llx)\n",
3982 (unsigned long long)v);
3983 t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
3984 t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
3987 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
3988 if (chip_id(adapter) <= CHELSIO_T5)
3989 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
3990 t4t5_sge_intr_info);
3992 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
3995 err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
3996 if (err & F_ERROR_QID_VALID) {
3997 CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
3998 if (err & F_UNCAPTURED_ERROR)
3999 CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
4000 t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
4001 F_UNCAPTURED_ERROR);
4005 t4_fatal_err(adapter);
4008 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
4009 F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
4010 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
4011 F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
4014 * CIM interrupt handler.
4016 static void cim_intr_handler(struct adapter *adapter)
4018 static const struct intr_info cim_intr_info[] = {
4019 { F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
4020 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
4021 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
4022 { F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
4023 { F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
4024 { F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
4025 { F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
4028 static const struct intr_info cim_upintr_info[] = {
4029 { F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
4030 { F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
4031 { F_ILLWRINT, "CIM illegal write", -1, 1 },
4032 { F_ILLRDINT, "CIM illegal read", -1, 1 },
4033 { F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
4034 { F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
4035 { F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
4036 { F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
4037 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
4038 { F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
4039 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
4040 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
4041 { F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
4042 { F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
4043 { F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
4044 { F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
4045 { F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
4046 { F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
4047 { F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
4048 { F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
4049 { F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
4050 { F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
4051 { F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
4052 { F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
4053 { F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
4054 { F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
4055 { F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
4056 { F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
4061 if (t4_read_reg(adapter, A_PCIE_FW) & F_PCIE_FW_ERR)
4062 t4_report_fw_error(adapter);
4064 fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
4066 t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
4069 t4_fatal_err(adapter);
4073 * ULP RX interrupt handler.
4075 static void ulprx_intr_handler(struct adapter *adapter)
4077 static const struct intr_info ulprx_intr_info[] = {
4078 { F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
4079 { F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
4080 { 0x7fffff, "ULPRX parity error", -1, 1 },
4084 if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
4085 t4_fatal_err(adapter);
4089 * ULP TX interrupt handler.
4091 static void ulptx_intr_handler(struct adapter *adapter)
4093 static const struct intr_info ulptx_intr_info[] = {
4094 { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
4096 { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
4098 { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
4100 { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
4102 { 0xfffffff, "ULPTX parity error", -1, 1 },
4106 if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
4107 t4_fatal_err(adapter);
4111 * PM TX interrupt handler.
4113 static void pmtx_intr_handler(struct adapter *adapter)
4115 static const struct intr_info pmtx_intr_info[] = {
4116 { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
4117 { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
4118 { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
4119 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
4120 { 0xffffff0, "PMTX framing error", -1, 1 },
4121 { F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
4122 { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
4124 { F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
4125 { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
4129 if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
4130 t4_fatal_err(adapter);
4134 * PM RX interrupt handler.
4136 static void pmrx_intr_handler(struct adapter *adapter)
4138 static const struct intr_info pmrx_intr_info[] = {
4139 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
4140 { 0x3ffff0, "PMRX framing error", -1, 1 },
4141 { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
4142 { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
4144 { F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
4145 { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
4149 if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
4150 t4_fatal_err(adapter);
4154 * CPL switch interrupt handler.
4156 static void cplsw_intr_handler(struct adapter *adapter)
4158 static const struct intr_info cplsw_intr_info[] = {
4159 { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
4160 { F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
4161 { F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
4162 { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
4163 { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
4164 { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
4168 if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
4169 t4_fatal_err(adapter);
4173 * LE interrupt handler.
4175 static void le_intr_handler(struct adapter *adap)
4177 unsigned int chip_ver = chip_id(adap);
4178 static const struct intr_info le_intr_info[] = {
4179 { F_LIPMISS, "LE LIP miss", -1, 0 },
4180 { F_LIP0, "LE 0 LIP error", -1, 0 },
4181 { F_PARITYERR, "LE parity error", -1, 1 },
4182 { F_UNKNOWNCMD, "LE unknown command", -1, 1 },
4183 { F_REQQPARERR, "LE request queue parity error", -1, 1 },
4187 static const struct intr_info t6_le_intr_info[] = {
4188 { F_T6_LIPMISS, "LE LIP miss", -1, 0 },
4189 { F_T6_LIP0, "LE 0 LIP error", -1, 0 },
4190 { F_TCAMINTPERR, "LE parity error", -1, 1 },
4191 { F_T6_UNKNOWNCMD, "LE unknown command", -1, 1 },
4192 { F_SSRAMINTPERR, "LE request queue parity error", -1, 1 },
4196 if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE,
4197 (chip_ver <= CHELSIO_T5) ?
4198 le_intr_info : t6_le_intr_info))
4203 * MPS interrupt handler.
4205 static void mps_intr_handler(struct adapter *adapter)
4207 static const struct intr_info mps_rx_intr_info[] = {
4208 { 0xffffff, "MPS Rx parity error", -1, 1 },
4211 static const struct intr_info mps_tx_intr_info[] = {
4212 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
4213 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4214 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
4216 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
4218 { F_BUBBLE, "MPS Tx underflow", -1, 1 },
4219 { F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
4220 { F_FRMERR, "MPS Tx framing error", -1, 1 },
4223 static const struct intr_info mps_trc_intr_info[] = {
4224 { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
4225 { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
4227 { F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
4230 static const struct intr_info mps_stat_sram_intr_info[] = {
4231 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
4234 static const struct intr_info mps_stat_tx_intr_info[] = {
4235 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
4238 static const struct intr_info mps_stat_rx_intr_info[] = {
4239 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
4242 static const struct intr_info mps_cls_intr_info[] = {
4243 { F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
4244 { F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
4245 { F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
4251 fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
4253 t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
4255 t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
4256 mps_trc_intr_info) +
4257 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
4258 mps_stat_sram_intr_info) +
4259 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
4260 mps_stat_tx_intr_info) +
4261 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
4262 mps_stat_rx_intr_info) +
4263 t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
4266 t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
4267 t4_read_reg(adapter, A_MPS_INT_CAUSE); /* flush */
4269 t4_fatal_err(adapter);
4272 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | \
4276 * EDC/MC interrupt handler.
4278 static void mem_intr_handler(struct adapter *adapter, int idx)
4280 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
4282 unsigned int addr, cnt_addr, v;
4284 if (idx <= MEM_EDC1) {
4285 addr = EDC_REG(A_EDC_INT_CAUSE, idx);
4286 cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
4287 } else if (idx == MEM_MC) {
4288 if (is_t4(adapter)) {
4289 addr = A_MC_INT_CAUSE;
4290 cnt_addr = A_MC_ECC_STATUS;
4292 addr = A_MC_P_INT_CAUSE;
4293 cnt_addr = A_MC_P_ECC_STATUS;
4296 addr = MC_REG(A_MC_P_INT_CAUSE, 1);
4297 cnt_addr = MC_REG(A_MC_P_ECC_STATUS, 1);
4300 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
4301 if (v & F_PERR_INT_CAUSE)
4302 CH_ALERT(adapter, "%s FIFO parity error\n",
4304 if (v & F_ECC_CE_INT_CAUSE) {
4305 u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
4307 t4_edc_err_read(adapter, idx);
4309 t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
4310 CH_WARN_RATELIMIT(adapter,
4311 "%u %s correctable ECC data error%s\n",
4312 cnt, name[idx], cnt > 1 ? "s" : "");
4314 if (v & F_ECC_UE_INT_CAUSE)
4316 "%s uncorrectable ECC data error\n", name[idx]);
4318 t4_write_reg(adapter, addr, v);
4319 if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
4320 t4_fatal_err(adapter);
4324 * MA interrupt handler.
4326 static void ma_intr_handler(struct adapter *adapter)
4328 u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
4330 if (status & F_MEM_PERR_INT_CAUSE) {
4332 "MA parity error, parity status %#x\n",
4333 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS1));
4336 "MA parity error, parity status %#x\n",
4337 t4_read_reg(adapter,
4338 A_MA_PARITY_ERROR_STATUS2));
4340 if (status & F_MEM_WRAP_INT_CAUSE) {
4341 v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
4342 CH_ALERT(adapter, "MA address wrap-around error by "
4343 "client %u to address %#x\n",
4344 G_MEM_WRAP_CLIENT_NUM(v),
4345 G_MEM_WRAP_ADDRESS(v) << 4);
4347 t4_write_reg(adapter, A_MA_INT_CAUSE, status);
4348 t4_fatal_err(adapter);
4352 * SMB interrupt handler.
4354 static void smb_intr_handler(struct adapter *adap)
4356 static const struct intr_info smb_intr_info[] = {
4357 { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
4358 { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
4359 { F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
4363 if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
4368 * NC-SI interrupt handler.
4370 static void ncsi_intr_handler(struct adapter *adap)
4372 static const struct intr_info ncsi_intr_info[] = {
4373 { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
4374 { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
4375 { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
4376 { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
4380 if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
4385 * XGMAC interrupt handler.
4387 static void xgmac_intr_handler(struct adapter *adap, int port)
4389 u32 v, int_cause_reg;
4392 int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
4394 int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
4396 v = t4_read_reg(adap, int_cause_reg);
4398 v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR);
4402 if (v & F_TXFIFO_PRTY_ERR)
4403 CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n",
4405 if (v & F_RXFIFO_PRTY_ERR)
4406 CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n",
4408 t4_write_reg(adap, int_cause_reg, v);
4413 * PL interrupt handler.
4415 static void pl_intr_handler(struct adapter *adap)
4417 static const struct intr_info pl_intr_info[] = {
4418 { F_FATALPERR, "Fatal parity error", -1, 1 },
4419 { F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
4423 static const struct intr_info t5_pl_intr_info[] = {
4424 { F_FATALPERR, "Fatal parity error", -1, 1 },
4428 if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE,
4430 pl_intr_info : t5_pl_intr_info))
4434 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
4437 * t4_slow_intr_handler - control path interrupt handler
4438 * @adapter: the adapter
4440 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
4441 * The designation 'slow' is because it involves register reads, while
4442 * data interrupts typically don't involve any MMIOs.
4444 int t4_slow_intr_handler(struct adapter *adapter)
4446 u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
4448 if (!(cause & GLBL_INTR_MASK))
4451 cim_intr_handler(adapter);
4453 mps_intr_handler(adapter);
4455 ncsi_intr_handler(adapter);
4457 pl_intr_handler(adapter);
4459 smb_intr_handler(adapter);
4461 xgmac_intr_handler(adapter, 0);
4463 xgmac_intr_handler(adapter, 1);
4465 xgmac_intr_handler(adapter, 2);
4467 xgmac_intr_handler(adapter, 3);
4469 pcie_intr_handler(adapter);
4471 mem_intr_handler(adapter, MEM_MC);
4472 if (is_t5(adapter) && (cause & F_MC1))
4473 mem_intr_handler(adapter, MEM_MC1);
4475 mem_intr_handler(adapter, MEM_EDC0);
4477 mem_intr_handler(adapter, MEM_EDC1);
4479 le_intr_handler(adapter);
4481 tp_intr_handler(adapter);
4483 ma_intr_handler(adapter);
4484 if (cause & F_PM_TX)
4485 pmtx_intr_handler(adapter);
4486 if (cause & F_PM_RX)
4487 pmrx_intr_handler(adapter);
4488 if (cause & F_ULP_RX)
4489 ulprx_intr_handler(adapter);
4490 if (cause & F_CPL_SWITCH)
4491 cplsw_intr_handler(adapter);
4493 sge_intr_handler(adapter);
4494 if (cause & F_ULP_TX)
4495 ulptx_intr_handler(adapter);
4497 /* Clear the interrupts just processed for which we are the master. */
4498 t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
4499 (void)t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
4504 * t4_intr_enable - enable interrupts
4505 * @adapter: the adapter whose interrupts should be enabled
4507 * Enable PF-specific interrupts for the calling function and the top-level
4508 * interrupt concentrator for global interrupts. Interrupts are already
4509 * enabled at each module, here we just enable the roots of the interrupt
4512 * Note: this function should be called only when the driver manages
4513 * non PF-specific interrupts from the various HW modules. Only one PCI
4514 * function at a time should be doing this.
4516 void t4_intr_enable(struct adapter *adapter)
4519 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4520 u32 pf = (chip_id(adapter) <= CHELSIO_T5
4521 ? G_SOURCEPF(whoami)
4522 : G_T6_SOURCEPF(whoami));
4524 if (chip_id(adapter) <= CHELSIO_T5)
4525 val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
4527 val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN;
4528 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
4529 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
4530 F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR |
4531 F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
4532 F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
4533 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
4534 F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val);
4535 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
4536 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
4540 * t4_intr_disable - disable interrupts
4541 * @adapter: the adapter whose interrupts should be disabled
4543 * Disable interrupts. We only disable the top-level interrupt
4544 * concentrators. The caller must be a PCI function managing global
4547 void t4_intr_disable(struct adapter *adapter)
4549 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4550 u32 pf = (chip_id(adapter) <= CHELSIO_T5
4551 ? G_SOURCEPF(whoami)
4552 : G_T6_SOURCEPF(whoami));
4554 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
4555 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
4559 * t4_intr_clear - clear all interrupts
4560 * @adapter: the adapter whose interrupts should be cleared
4562 * Clears all interrupts. The caller must be a PCI function managing
4563 * global interrupts.
4565 void t4_intr_clear(struct adapter *adapter)
4567 static const unsigned int cause_reg[] = {
4568 A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
4569 A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
4570 A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS1, A_MA_INT_CAUSE,
4571 A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
4572 A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
4573 MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
4575 A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE,
4576 A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE,
4577 A_MPS_RX_PERR_INT_CAUSE,
4579 MYPF_REG(A_PL_PF_INT_CAUSE),
4586 for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
4587 t4_write_reg(adapter, cause_reg[i], 0xffffffff);
4589 t4_write_reg(adapter, is_t4(adapter) ? A_MC_INT_CAUSE :
4590 A_MC_P_INT_CAUSE, 0xffffffff);
4592 if (is_t4(adapter)) {
4593 t4_write_reg(adapter, A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
4595 t4_write_reg(adapter, A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
4598 t4_write_reg(adapter, A_MA_PARITY_ERROR_STATUS2, 0xffffffff);
4600 t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
4601 (void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
4605 * hash_mac_addr - return the hash value of a MAC address
4606 * @addr: the 48-bit Ethernet MAC address
4608 * Hashes a MAC address according to the hash function used by HW inexact
4609 * (hash) address matching.
4611 static int hash_mac_addr(const u8 *addr)
4613 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
4614 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
4622 * t4_config_rss_range - configure a portion of the RSS mapping table
4623 * @adapter: the adapter
4624 * @mbox: mbox to use for the FW command
4625 * @viid: virtual interface whose RSS subtable is to be written
4626 * @start: start entry in the table to write
4627 * @n: how many table entries to write
4628 * @rspq: values for the "response queue" (Ingress Queue) lookup table
4629 * @nrspq: number of values in @rspq
4631 * Programs the selected part of the VI's RSS mapping table with the
4632 * provided values. If @nrspq < @n the supplied values are used repeatedly
4633 * until the full table range is populated.
4635 * The caller must ensure the values in @rspq are in the range allowed for
4638 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
4639 int start, int n, const u16 *rspq, unsigned int nrspq)
4642 const u16 *rsp = rspq;
4643 const u16 *rsp_end = rspq + nrspq;
4644 struct fw_rss_ind_tbl_cmd cmd;
4646 memset(&cmd, 0, sizeof(cmd));
4647 cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
4648 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4649 V_FW_RSS_IND_TBL_CMD_VIID(viid));
4650 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
4653 * Each firmware RSS command can accommodate up to 32 RSS Ingress
4654 * Queue Identifiers. These Ingress Queue IDs are packed three to
4655 * a 32-bit word as 10-bit values with the upper remaining 2 bits
4659 int nq = min(n, 32);
4661 __be32 *qp = &cmd.iq0_to_iq2;
4664 * Set up the firmware RSS command header to send the next
4665 * "nq" Ingress Queue IDs to the firmware.
4667 cmd.niqid = cpu_to_be16(nq);
4668 cmd.startidx = cpu_to_be16(start);
4671 * "nq" more done for the start of the next loop.
4677 * While there are still Ingress Queue IDs to stuff into the
4678 * current firmware RSS command, retrieve them from the
4679 * Ingress Queue ID array and insert them into the command.
4683 * Grab up to the next 3 Ingress Queue IDs (wrapping
4684 * around the Ingress Queue ID array if necessary) and
4685 * insert them into the firmware RSS command at the
4686 * current 3-tuple position within the commad.
4690 int nqbuf = min(3, nq);
4693 qbuf[0] = qbuf[1] = qbuf[2] = 0;
4694 while (nqbuf && nq_packed < 32) {
4701 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
4702 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
4703 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
4707 * Send this portion of the RRS table update to the firmware;
4708 * bail out on any errors.
4710 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
4718 * t4_config_glbl_rss - configure the global RSS mode
4719 * @adapter: the adapter
4720 * @mbox: mbox to use for the FW command
4721 * @mode: global RSS mode
4722 * @flags: mode-specific flags
4724 * Sets the global RSS mode.
4726 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
4729 struct fw_rss_glb_config_cmd c;
4731 memset(&c, 0, sizeof(c));
4732 c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
4733 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
4734 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4735 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
4736 c.u.manual.mode_pkd =
4737 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
4738 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
4739 c.u.basicvirtual.mode_keymode =
4740 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
4741 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
4744 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
4748 * t4_config_vi_rss - configure per VI RSS settings
4749 * @adapter: the adapter
4750 * @mbox: mbox to use for the FW command
4753 * @defq: id of the default RSS queue for the VI.
4754 * @skeyidx: RSS secret key table index for non-global mode
4755 * @skey: RSS vf_scramble key for VI.
4757 * Configures VI-specific RSS properties.
4759 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
4760 unsigned int flags, unsigned int defq, unsigned int skeyidx,
4763 struct fw_rss_vi_config_cmd c;
4765 memset(&c, 0, sizeof(c));
4766 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
4767 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4768 V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
4769 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4770 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
4771 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
4772 c.u.basicvirtual.secretkeyidx_pkd = cpu_to_be32(
4773 V_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(skeyidx));
4774 c.u.basicvirtual.secretkeyxor = cpu_to_be32(skey);
4776 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
4779 /* Read an RSS table row */
4780 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
4782 t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
4783 return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
4788 * t4_read_rss - read the contents of the RSS mapping table
4789 * @adapter: the adapter
4790 * @map: holds the contents of the RSS mapping table
4792 * Reads the contents of the RSS hash->queue mapping table.
4794 int t4_read_rss(struct adapter *adapter, u16 *map)
4799 for (i = 0; i < RSS_NENTRIES / 2; ++i) {
4800 ret = rd_rss_row(adapter, i, &val);
4803 *map++ = G_LKPTBLQUEUE0(val);
4804 *map++ = G_LKPTBLQUEUE1(val);
4810 * t4_fw_tp_pio_rw - Access TP PIO through LDST
4811 * @adap: the adapter
4812 * @vals: where the indirect register values are stored/written
4813 * @nregs: how many indirect registers to read/write
4814 * @start_idx: index of first indirect register to read/write
4815 * @rw: Read (1) or Write (0)
4817 * Access TP PIO registers through LDST
4819 void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
4820 unsigned int start_index, unsigned int rw)
4823 int cmd = FW_LDST_ADDRSPC_TP_PIO;
4824 struct fw_ldst_cmd c;
4826 for (i = 0 ; i < nregs; i++) {
4827 memset(&c, 0, sizeof(c));
4828 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
4830 (rw ? F_FW_CMD_READ :
4832 V_FW_LDST_CMD_ADDRSPACE(cmd));
4833 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
4835 c.u.addrval.addr = cpu_to_be32(start_index + i);
4836 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
4837 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4840 vals[i] = be32_to_cpu(c.u.addrval.val);
4846 * t4_read_rss_key - read the global RSS key
4847 * @adap: the adapter
4848 * @key: 10-entry array holding the 320-bit RSS key
4850 * Reads the global 320-bit RSS key.
4852 void t4_read_rss_key(struct adapter *adap, u32 *key)
4854 if (t4_use_ldst(adap))
4855 t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 1);
4857 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
4858 A_TP_RSS_SECRET_KEY0);
4862 * t4_write_rss_key - program one of the RSS keys
4863 * @adap: the adapter
4864 * @key: 10-entry array holding the 320-bit RSS key
4865 * @idx: which RSS key to write
4867 * Writes one of the RSS keys with the given 320-bit value. If @idx is
4868 * 0..15 the corresponding entry in the RSS key table is written,
4869 * otherwise the global RSS key is written.
4871 void t4_write_rss_key(struct adapter *adap, u32 *key, int idx)
4873 u8 rss_key_addr_cnt = 16;
4874 u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
4877 * T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
4878 * allows access to key addresses 16-63 by using KeyWrAddrX
4879 * as index[5:4](upper 2) into key table
4881 if ((chip_id(adap) > CHELSIO_T5) &&
4882 (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
4883 rss_key_addr_cnt = 32;
4885 if (t4_use_ldst(adap))
4886 t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 0);
4888 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
4889 A_TP_RSS_SECRET_KEY0);
4891 if (idx >= 0 && idx < rss_key_addr_cnt) {
4892 if (rss_key_addr_cnt > 16)
4893 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
4894 vrt | V_KEYWRADDRX(idx >> 4) |
4895 V_T6_VFWRADDR(idx) | F_KEYWREN);
4897 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
4898 vrt| V_KEYWRADDR(idx) | F_KEYWREN);
4903 * t4_read_rss_pf_config - read PF RSS Configuration Table
4904 * @adapter: the adapter
4905 * @index: the entry in the PF RSS table to read
4906 * @valp: where to store the returned value
4908 * Reads the PF RSS Configuration Table at the specified index and returns
4909 * the value found there.
4911 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
4914 if (t4_use_ldst(adapter))
4915 t4_fw_tp_pio_rw(adapter, valp, 1,
4916 A_TP_RSS_PF0_CONFIG + index, 1);
4918 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4919 valp, 1, A_TP_RSS_PF0_CONFIG + index);
4923 * t4_write_rss_pf_config - write PF RSS Configuration Table
4924 * @adapter: the adapter
4925 * @index: the entry in the VF RSS table to read
4926 * @val: the value to store
4928 * Writes the PF RSS Configuration Table at the specified index with the
4931 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index,
4934 if (t4_use_ldst(adapter))
4935 t4_fw_tp_pio_rw(adapter, &val, 1,
4936 A_TP_RSS_PF0_CONFIG + index, 0);
4938 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4939 &val, 1, A_TP_RSS_PF0_CONFIG + index);
4943 * t4_read_rss_vf_config - read VF RSS Configuration Table
4944 * @adapter: the adapter
4945 * @index: the entry in the VF RSS table to read
4946 * @vfl: where to store the returned VFL
4947 * @vfh: where to store the returned VFH
4949 * Reads the VF RSS Configuration Table at the specified index and returns
4950 * the (VFL, VFH) values found there.
4952 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
4955 u32 vrt, mask, data;
4957 if (chip_id(adapter) <= CHELSIO_T5) {
4958 mask = V_VFWRADDR(M_VFWRADDR);
4959 data = V_VFWRADDR(index);
4961 mask = V_T6_VFWRADDR(M_T6_VFWRADDR);
4962 data = V_T6_VFWRADDR(index);
4965 * Request that the index'th VF Table values be read into VFL/VFH.
4967 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
4968 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
4969 vrt |= data | F_VFRDEN;
4970 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
4973 * Grab the VFL/VFH values ...
4975 if (t4_use_ldst(adapter)) {
4976 t4_fw_tp_pio_rw(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, 1);
4977 t4_fw_tp_pio_rw(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, 1);
4979 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4980 vfl, 1, A_TP_RSS_VFL_CONFIG);
4981 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4982 vfh, 1, A_TP_RSS_VFH_CONFIG);
4987 * t4_write_rss_vf_config - write VF RSS Configuration Table
4989 * @adapter: the adapter
4990 * @index: the entry in the VF RSS table to write
4991 * @vfl: the VFL to store
4992 * @vfh: the VFH to store
4994 * Writes the VF RSS Configuration Table at the specified index with the
4995 * specified (VFL, VFH) values.
4997 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
5000 u32 vrt, mask, data;
5002 if (chip_id(adapter) <= CHELSIO_T5) {
5003 mask = V_VFWRADDR(M_VFWRADDR);
5004 data = V_VFWRADDR(index);
5006 mask = V_T6_VFWRADDR(M_T6_VFWRADDR);
5007 data = V_T6_VFWRADDR(index);
5011 * Load up VFL/VFH with the values to be written ...
5013 if (t4_use_ldst(adapter)) {
5014 t4_fw_tp_pio_rw(adapter, &vfl, 1, A_TP_RSS_VFL_CONFIG, 0);
5015 t4_fw_tp_pio_rw(adapter, &vfh, 1, A_TP_RSS_VFH_CONFIG, 0);
5017 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5018 &vfl, 1, A_TP_RSS_VFL_CONFIG);
5019 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5020 &vfh, 1, A_TP_RSS_VFH_CONFIG);
5024 * Write the VFL/VFH into the VF Table at index'th location.
5026 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
5027 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
5028 vrt |= data | F_VFRDEN;
5029 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
5033 * t4_read_rss_pf_map - read PF RSS Map
5034 * @adapter: the adapter
5036 * Reads the PF RSS Map register and returns its value.
5038 u32 t4_read_rss_pf_map(struct adapter *adapter)
5042 if (t4_use_ldst(adapter))
5043 t4_fw_tp_pio_rw(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, 1);
5045 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5046 &pfmap, 1, A_TP_RSS_PF_MAP);
5051 * t4_write_rss_pf_map - write PF RSS Map
5052 * @adapter: the adapter
5053 * @pfmap: PF RSS Map value
5055 * Writes the specified value to the PF RSS Map register.
5057 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap)
5059 if (t4_use_ldst(adapter))
5060 t4_fw_tp_pio_rw(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, 0);
5062 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5063 &pfmap, 1, A_TP_RSS_PF_MAP);
5067 * t4_read_rss_pf_mask - read PF RSS Mask
5068 * @adapter: the adapter
5070 * Reads the PF RSS Mask register and returns its value.
5072 u32 t4_read_rss_pf_mask(struct adapter *adapter)
5076 if (t4_use_ldst(adapter))
5077 t4_fw_tp_pio_rw(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, 1);
5079 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5080 &pfmask, 1, A_TP_RSS_PF_MSK);
5085 * t4_write_rss_pf_mask - write PF RSS Mask
5086 * @adapter: the adapter
5087 * @pfmask: PF RSS Mask value
5089 * Writes the specified value to the PF RSS Mask register.
5091 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask)
5093 if (t4_use_ldst(adapter))
5094 t4_fw_tp_pio_rw(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, 0);
5096 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5097 &pfmask, 1, A_TP_RSS_PF_MSK);
5101 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
5102 * @adap: the adapter
5103 * @v4: holds the TCP/IP counter values
5104 * @v6: holds the TCP/IPv6 counter values
5106 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
5107 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
5109 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
5110 struct tp_tcp_stats *v6)
5112 u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
5114 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
5115 #define STAT(x) val[STAT_IDX(x)]
5116 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
5119 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
5120 ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST);
5121 v4->tcp_out_rsts = STAT(OUT_RST);
5122 v4->tcp_in_segs = STAT64(IN_SEG);
5123 v4->tcp_out_segs = STAT64(OUT_SEG);
5124 v4->tcp_retrans_segs = STAT64(RXT_SEG);
5127 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
5128 ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST);
5129 v6->tcp_out_rsts = STAT(OUT_RST);
5130 v6->tcp_in_segs = STAT64(IN_SEG);
5131 v6->tcp_out_segs = STAT64(OUT_SEG);
5132 v6->tcp_retrans_segs = STAT64(RXT_SEG);
5140 * t4_tp_get_err_stats - read TP's error MIB counters
5141 * @adap: the adapter
5142 * @st: holds the counter values
5144 * Returns the values of TP's error counters.
5146 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
5148 int nchan = adap->chip_params->nchan;
5150 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5151 st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0);
5152 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5153 st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0);
5154 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5155 st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0);
5156 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5157 st->tnl_cong_drops, nchan, A_TP_MIB_TNL_CNG_DROP_0);
5158 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5159 st->ofld_chan_drops, nchan, A_TP_MIB_OFD_CHN_DROP_0);
5160 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5161 st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0);
5162 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5163 st->ofld_vlan_drops, nchan, A_TP_MIB_OFD_VLN_DROP_0);
5164 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5165 st->tcp6_in_errs, nchan, A_TP_MIB_TCP_V6IN_ERR_0);
5167 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5168 &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP);
5172 * t4_tp_get_proxy_stats - read TP's proxy MIB counters
5173 * @adap: the adapter
5174 * @st: holds the counter values
5176 * Returns the values of TP's proxy counters.
5178 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st)
5180 int nchan = adap->chip_params->nchan;
5182 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy,
5183 nchan, A_TP_MIB_TNL_LPBK_0);
5187 * t4_tp_get_cpl_stats - read TP's CPL MIB counters
5188 * @adap: the adapter
5189 * @st: holds the counter values
5191 * Returns the values of TP's CPL counters.
5193 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
5195 int nchan = adap->chip_params->nchan;
5197 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req,
5198 nchan, A_TP_MIB_CPL_IN_REQ_0);
5199 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->rsp,
5200 nchan, A_TP_MIB_CPL_OUT_RSP_0);
5204 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
5205 * @adap: the adapter
5206 * @st: holds the counter values
5208 * Returns the values of TP's RDMA counters.
5210 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
5212 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_pkt,
5213 2, A_TP_MIB_RQE_DFR_PKT);
5217 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
5218 * @adap: the adapter
5219 * @idx: the port index
5220 * @st: holds the counter values
5222 * Returns the values of TP's FCoE counters for the selected port.
5224 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
5225 struct tp_fcoe_stats *st)
5229 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->frames_ddp,
5230 1, A_TP_MIB_FCOE_DDP_0 + idx);
5231 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->frames_drop,
5232 1, A_TP_MIB_FCOE_DROP_0 + idx);
5233 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
5234 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx);
5235 st->octets_ddp = ((u64)val[0] << 32) | val[1];
5239 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
5240 * @adap: the adapter
5241 * @st: holds the counter values
5243 * Returns the values of TP's counters for non-TCP directly-placed packets.
5245 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
5249 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4,
5251 st->frames = val[0];
5253 st->octets = ((u64)val[2] << 32) | val[3];
5257 * t4_read_mtu_tbl - returns the values in the HW path MTU table
5258 * @adap: the adapter
5259 * @mtus: where to store the MTU values
5260 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
5262 * Reads the HW path MTU table.
5264 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
5269 for (i = 0; i < NMTUS; ++i) {
5270 t4_write_reg(adap, A_TP_MTU_TABLE,
5271 V_MTUINDEX(0xff) | V_MTUVALUE(i));
5272 v = t4_read_reg(adap, A_TP_MTU_TABLE);
5273 mtus[i] = G_MTUVALUE(v);
5275 mtu_log[i] = G_MTUWIDTH(v);
5280 * t4_read_cong_tbl - reads the congestion control table
5281 * @adap: the adapter
5282 * @incr: where to store the alpha values
5284 * Reads the additive increments programmed into the HW congestion
5287 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
5289 unsigned int mtu, w;
5291 for (mtu = 0; mtu < NMTUS; ++mtu)
5292 for (w = 0; w < NCCTRL_WIN; ++w) {
5293 t4_write_reg(adap, A_TP_CCTRL_TABLE,
5294 V_ROWINDEX(0xffff) | (mtu << 5) | w);
5295 incr[mtu][w] = (u16)t4_read_reg(adap,
5296 A_TP_CCTRL_TABLE) & 0x1fff;
5301 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
5302 * @adap: the adapter
5303 * @addr: the indirect TP register address
5304 * @mask: specifies the field within the register to modify
5305 * @val: new value for the field
5307 * Sets a field of an indirect TP register to the given value.
5309 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
5310 unsigned int mask, unsigned int val)
5312 t4_write_reg(adap, A_TP_PIO_ADDR, addr);
5313 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
5314 t4_write_reg(adap, A_TP_PIO_DATA, val);
5318 * init_cong_ctrl - initialize congestion control parameters
5319 * @a: the alpha values for congestion control
5320 * @b: the beta values for congestion control
5322 * Initialize the congestion control parameters.
5324 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
5326 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
5351 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
5354 b[13] = b[14] = b[15] = b[16] = 3;
5355 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
5356 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
5361 /* The minimum additive increment value for the congestion control table */
5362 #define CC_MIN_INCR 2U
5365 * t4_load_mtus - write the MTU and congestion control HW tables
5366 * @adap: the adapter
5367 * @mtus: the values for the MTU table
5368 * @alpha: the values for the congestion control alpha parameter
5369 * @beta: the values for the congestion control beta parameter
5371 * Write the HW MTU table with the supplied MTUs and the high-speed
5372 * congestion control table with the supplied alpha, beta, and MTUs.
5373 * We write the two tables together because the additive increments
5374 * depend on the MTUs.
5376 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
5377 const unsigned short *alpha, const unsigned short *beta)
5379 static const unsigned int avg_pkts[NCCTRL_WIN] = {
5380 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
5381 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
5382 28672, 40960, 57344, 81920, 114688, 163840, 229376
5387 for (i = 0; i < NMTUS; ++i) {
5388 unsigned int mtu = mtus[i];
5389 unsigned int log2 = fls(mtu);
5391 if (!(mtu & ((1 << log2) >> 2))) /* round */
5393 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
5394 V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
5396 for (w = 0; w < NCCTRL_WIN; ++w) {
5399 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
5402 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
5403 (w << 16) | (beta[w] << 13) | inc);
5409 * t4_set_pace_tbl - set the pace table
5410 * @adap: the adapter
5411 * @pace_vals: the pace values in microseconds
5412 * @start: index of the first entry in the HW pace table to set
5413 * @n: how many entries to set
5415 * Sets (a subset of the) HW pace table.
5417 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
5418 unsigned int start, unsigned int n)
5420 unsigned int vals[NTX_SCHED], i;
5421 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
5426 /* convert values from us to dack ticks, rounding to closest value */
5427 for (i = 0; i < n; i++, pace_vals++) {
5428 vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
5429 if (vals[i] > 0x7ff)
5431 if (*pace_vals && vals[i] == 0)
5434 for (i = 0; i < n; i++, start++)
5435 t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
5440 * t4_set_sched_bps - set the bit rate for a HW traffic scheduler
5441 * @adap: the adapter
5442 * @kbps: target rate in Kbps
5443 * @sched: the scheduler index
5445 * Configure a Tx HW scheduler for the target rate.
5447 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
5449 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
5450 unsigned int clk = adap->params.vpd.cclk * 1000;
5451 unsigned int selected_cpt = 0, selected_bpt = 0;
5454 kbps *= 125; /* -> bytes */
5455 for (cpt = 1; cpt <= 255; cpt++) {
5457 bpt = (kbps + tps / 2) / tps;
5458 if (bpt > 0 && bpt <= 255) {
5460 delta = v >= kbps ? v - kbps : kbps - v;
5461 if (delta < mindelta) {
5466 } else if (selected_cpt)
5472 t4_write_reg(adap, A_TP_TM_PIO_ADDR,
5473 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
5474 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
5476 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
5478 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
5479 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
5484 * t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
5485 * @adap: the adapter
5486 * @sched: the scheduler index
5487 * @ipg: the interpacket delay in tenths of nanoseconds
5489 * Set the interpacket delay for a HW packet rate scheduler.
5491 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
5493 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
5495 /* convert ipg to nearest number of core clocks */
5496 ipg *= core_ticks_per_usec(adap);
5497 ipg = (ipg + 5000) / 10000;
5498 if (ipg > M_TXTIMERSEPQ0)
5501 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
5502 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
5504 v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
5506 v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
5507 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
5508 t4_read_reg(adap, A_TP_TM_PIO_DATA);
5513 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
5514 * clocks. The formula is
5516 * bytes/s = bytes256 * 256 * ClkFreq / 4096
5518 * which is equivalent to
5520 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
5522 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
5524 u64 v = bytes256 * adap->params.vpd.cclk;
5526 return v * 62 + v / 2;
5530 * t4_get_chan_txrate - get the current per channel Tx rates
5531 * @adap: the adapter
5532 * @nic_rate: rates for NIC traffic
5533 * @ofld_rate: rates for offloaded traffic
5535 * Return the current Tx rates in bytes/s for NIC and offloaded traffic
5538 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
5542 v = t4_read_reg(adap, A_TP_TX_TRATE);
5543 nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
5544 nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
5545 if (adap->chip_params->nchan > 2) {
5546 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
5547 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
5550 v = t4_read_reg(adap, A_TP_TX_ORATE);
5551 ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
5552 ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
5553 if (adap->chip_params->nchan > 2) {
5554 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
5555 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
5560 * t4_set_trace_filter - configure one of the tracing filters
5561 * @adap: the adapter
5562 * @tp: the desired trace filter parameters
5563 * @idx: which filter to configure
5564 * @enable: whether to enable or disable the filter
5566 * Configures one of the tracing filters available in HW. If @tp is %NULL
5567 * it indicates that the filter is already written in the register and it
5568 * just needs to be enabled or disabled.
5570 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
5571 int idx, int enable)
5573 int i, ofst = idx * 4;
5574 u32 data_reg, mask_reg, cfg;
5575 u32 multitrc = F_TRCMULTIFILTER;
5576 u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN;
5578 if (idx < 0 || idx >= NTRACE)
5581 if (tp == NULL || !enable) {
5582 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en,
5588 * TODO - After T4 data book is updated, specify the exact
5591 * See T4 data book - MPS section for a complete description
5592 * of the below if..else handling of A_MPS_TRC_CFG register
5595 cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
5596 if (cfg & F_TRCMULTIFILTER) {
5598 * If multiple tracers are enabled, then maximum
5599 * capture size is 2.5KB (FIFO size of a single channel)
5600 * minus 2 flits for CPL_TRACE_PKT header.
5602 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
5606 * If multiple tracers are disabled, to avoid deadlocks
5607 * maximum packet capture size of 9600 bytes is recommended.
5608 * Also in this mode, only trace0 can be enabled and running.
5611 if (tp->snap_len > 9600 || idx)
5615 if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 ||
5616 tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET ||
5617 tp->min_len > M_TFMINPKTSIZE)
5620 /* stop the tracer we'll be changing */
5621 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0);
5623 idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
5624 data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
5625 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
5627 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5628 t4_write_reg(adap, data_reg, tp->data[i]);
5629 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
5631 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
5632 V_TFCAPTUREMAX(tp->snap_len) |
5633 V_TFMINPKTSIZE(tp->min_len));
5634 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
5635 V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en |
5637 V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) :
5638 V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert)));
5644 * t4_get_trace_filter - query one of the tracing filters
5645 * @adap: the adapter
5646 * @tp: the current trace filter parameters
5647 * @idx: which trace filter to query
5648 * @enabled: non-zero if the filter is enabled
5650 * Returns the current settings of one of the HW tracing filters.
5652 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
5656 int i, ofst = idx * 4;
5657 u32 data_reg, mask_reg;
5659 ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
5660 ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
5663 *enabled = !!(ctla & F_TFEN);
5664 tp->port = G_TFPORT(ctla);
5665 tp->invert = !!(ctla & F_TFINVERTMATCH);
5667 *enabled = !!(ctla & F_T5_TFEN);
5668 tp->port = G_T5_TFPORT(ctla);
5669 tp->invert = !!(ctla & F_T5_TFINVERTMATCH);
5671 tp->snap_len = G_TFCAPTUREMAX(ctlb);
5672 tp->min_len = G_TFMINPKTSIZE(ctlb);
5673 tp->skip_ofst = G_TFOFFSET(ctla);
5674 tp->skip_len = G_TFLENGTH(ctla);
5676 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
5677 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
5678 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
5680 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5681 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
5682 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
5687 * t4_pmtx_get_stats - returns the HW stats from PMTX
5688 * @adap: the adapter
5689 * @cnt: where to store the count statistics
5690 * @cycles: where to store the cycle statistics
5692 * Returns performance statistics from PMTX.
5694 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5699 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
5700 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
5701 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
5703 cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
5705 t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
5706 A_PM_TX_DBG_DATA, data, 2,
5707 A_PM_TX_DBG_STAT_MSB);
5708 cycles[i] = (((u64)data[0] << 32) | data[1]);
5714 * t4_pmrx_get_stats - returns the HW stats from PMRX
5715 * @adap: the adapter
5716 * @cnt: where to store the count statistics
5717 * @cycles: where to store the cycle statistics
5719 * Returns performance statistics from PMRX.
5721 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5726 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
5727 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
5728 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
5730 cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
5732 t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
5733 A_PM_RX_DBG_DATA, data, 2,
5734 A_PM_RX_DBG_STAT_MSB);
5735 cycles[i] = (((u64)data[0] << 32) | data[1]);
5741 * t4_get_mps_bg_map - return the buffer groups associated with a port
5742 * @adap: the adapter
5743 * @idx: the port index
5745 * Returns a bitmap indicating which MPS buffer groups are associated
5746 * with the given port. Bit i is set if buffer group i is used by the
5749 static unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
5751 u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
5754 return idx == 0 ? 0xf : 0;
5755 if (n == 1 && chip_id(adap) <= CHELSIO_T5)
5756 return idx < 2 ? (3 << (2 * idx)) : 0;
5761 * t4_get_port_type_description - return Port Type string description
5762 * @port_type: firmware Port Type enumeration
5764 const char *t4_get_port_type_description(enum fw_port_type port_type)
5766 static const char *const port_type_description[] = {
5792 if (port_type < ARRAY_SIZE(port_type_description))
5793 return port_type_description[port_type];
5798 * t4_get_port_stats_offset - collect port stats relative to a previous
5800 * @adap: The adapter
5802 * @stats: Current stats to fill
5803 * @offset: Previous stats snapshot
5805 void t4_get_port_stats_offset(struct adapter *adap, int idx,
5806 struct port_stats *stats,
5807 struct port_stats *offset)
5812 t4_get_port_stats(adap, idx, stats);
5813 for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
5814 i < (sizeof(struct port_stats)/sizeof(u64)) ;
5820 * t4_get_port_stats - collect port statistics
5821 * @adap: the adapter
5822 * @idx: the port index
5823 * @p: the stats structure to fill
5825 * Collect statistics related to the given port from HW.
5827 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
5829 u32 bgmap = t4_get_mps_bg_map(adap, idx);
5832 #define GET_STAT(name) \
5833 t4_read_reg64(adap, \
5834 (is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
5835 T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
5836 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
5838 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
5840 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
5841 p->tx_octets = GET_STAT(TX_PORT_BYTES);
5842 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
5843 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
5844 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
5845 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
5846 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
5847 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
5848 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
5849 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
5850 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
5851 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
5852 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
5853 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
5854 p->tx_drop = GET_STAT(TX_PORT_DROP);
5855 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
5856 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
5857 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
5858 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
5859 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
5860 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
5861 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
5862 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
5864 if (stat_ctl & F_COUNTPAUSESTATTX) {
5865 p->tx_frames -= p->tx_pause;
5866 p->tx_octets -= p->tx_pause * 64;
5867 p->tx_mcast_frames -= p->tx_pause;
5870 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
5871 p->rx_octets = GET_STAT(RX_PORT_BYTES);
5872 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
5873 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
5874 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
5875 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
5876 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
5877 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
5878 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
5879 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
5880 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
5881 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
5882 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
5883 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
5884 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
5885 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
5886 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
5887 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
5888 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
5889 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
5890 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
5891 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
5892 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
5893 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
5894 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
5895 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
5896 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
5898 if (stat_ctl & F_COUNTPAUSESTATRX) {
5899 p->rx_frames -= p->rx_pause;
5900 p->rx_octets -= p->rx_pause * 64;
5901 p->rx_mcast_frames -= p->rx_pause;
5904 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
5905 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
5906 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
5907 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
5908 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
5909 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
5910 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
5911 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
5918 * t4_get_lb_stats - collect loopback port statistics
5919 * @adap: the adapter
5920 * @idx: the loopback port index
5921 * @p: the stats structure to fill
5923 * Return HW statistics for the given loopback port.
5925 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
5927 u32 bgmap = t4_get_mps_bg_map(adap, idx);
5929 #define GET_STAT(name) \
5930 t4_read_reg64(adap, \
5932 PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
5933 T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
5934 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
5936 p->octets = GET_STAT(BYTES);
5937 p->frames = GET_STAT(FRAMES);
5938 p->bcast_frames = GET_STAT(BCAST);
5939 p->mcast_frames = GET_STAT(MCAST);
5940 p->ucast_frames = GET_STAT(UCAST);
5941 p->error_frames = GET_STAT(ERROR);
5943 p->frames_64 = GET_STAT(64B);
5944 p->frames_65_127 = GET_STAT(65B_127B);
5945 p->frames_128_255 = GET_STAT(128B_255B);
5946 p->frames_256_511 = GET_STAT(256B_511B);
5947 p->frames_512_1023 = GET_STAT(512B_1023B);
5948 p->frames_1024_1518 = GET_STAT(1024B_1518B);
5949 p->frames_1519_max = GET_STAT(1519B_MAX);
5950 p->drop = GET_STAT(DROP_FRAMES);
5952 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
5953 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
5954 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
5955 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
5956 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
5957 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
5958 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
5959 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
5966 * t4_wol_magic_enable - enable/disable magic packet WoL
5967 * @adap: the adapter
5968 * @port: the physical port index
5969 * @addr: MAC address expected in magic packets, %NULL to disable
5971 * Enables/disables magic packet wake-on-LAN for the selected port.
5973 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
5976 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
5979 mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO);
5980 mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI);
5981 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
5983 mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO);
5984 mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI);
5985 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
5989 t4_write_reg(adap, mag_id_reg_l,
5990 (addr[2] << 24) | (addr[3] << 16) |
5991 (addr[4] << 8) | addr[5]);
5992 t4_write_reg(adap, mag_id_reg_h,
5993 (addr[0] << 8) | addr[1]);
5995 t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN,
5996 V_MAGICEN(addr != NULL));
6000 * t4_wol_pat_enable - enable/disable pattern-based WoL
6001 * @adap: the adapter
6002 * @port: the physical port index
6003 * @map: bitmap of which HW pattern filters to set
6004 * @mask0: byte mask for bytes 0-63 of a packet
6005 * @mask1: byte mask for bytes 64-127 of a packet
6006 * @crc: Ethernet CRC for selected bytes
6007 * @enable: enable/disable switch
6009 * Sets the pattern filters indicated in @map to mask out the bytes
6010 * specified in @mask0/@mask1 in received packets and compare the CRC of
6011 * the resulting packet against @crc. If @enable is %true pattern-based
6012 * WoL is enabled, otherwise disabled.
6014 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
6015 u64 mask0, u64 mask1, unsigned int crc, bool enable)
6021 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
6023 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
6026 t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
6032 #define EPIO_REG(name) \
6033 (is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \
6034 T5_PORT_REG(port, A_MAC_PORT_EPIO_##name))
6036 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
6037 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
6038 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
6040 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
6044 /* write byte masks */
6045 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
6046 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
6047 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
6048 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
6052 t4_write_reg(adap, EPIO_REG(DATA0), crc);
6053 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
6054 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
6055 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
6060 t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN);
6064 /* t4_mk_filtdelwr - create a delete filter WR
6065 * @ftid: the filter ID
6066 * @wr: the filter work request to populate
6067 * @qid: ingress queue to receive the delete notification
6069 * Creates a filter work request to delete the supplied filter. If @qid is
6070 * negative the delete notification is suppressed.
6072 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
6074 memset(wr, 0, sizeof(*wr));
6075 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
6076 wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
6077 wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
6078 V_FW_FILTER_WR_NOREPLY(qid < 0));
6079 wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
6081 wr->rx_chan_rx_rpl_iq =
6082 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
6085 #define INIT_CMD(var, cmd, rd_wr) do { \
6086 (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
6087 F_FW_CMD_REQUEST | \
6088 F_FW_CMD_##rd_wr); \
6089 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
6092 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
6096 struct fw_ldst_cmd c;
6098 memset(&c, 0, sizeof(c));
6099 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE);
6100 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6104 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6105 c.u.addrval.addr = cpu_to_be32(addr);
6106 c.u.addrval.val = cpu_to_be32(val);
6108 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6112 * t4_mdio_rd - read a PHY register through MDIO
6113 * @adap: the adapter
6114 * @mbox: mailbox to use for the FW command
6115 * @phy_addr: the PHY address
6116 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
6117 * @reg: the register to read
6118 * @valp: where to store the value
6120 * Issues a FW command through the given mailbox to read a PHY register.
6122 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6123 unsigned int mmd, unsigned int reg, unsigned int *valp)
6127 struct fw_ldst_cmd c;
6129 memset(&c, 0, sizeof(c));
6130 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
6131 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6132 F_FW_CMD_REQUEST | F_FW_CMD_READ |
6134 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6135 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
6136 V_FW_LDST_CMD_MMD(mmd));
6137 c.u.mdio.raddr = cpu_to_be16(reg);
6139 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6141 *valp = be16_to_cpu(c.u.mdio.rval);
6146 * t4_mdio_wr - write a PHY register through MDIO
6147 * @adap: the adapter
6148 * @mbox: mailbox to use for the FW command
6149 * @phy_addr: the PHY address
6150 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
6151 * @reg: the register to write
6152 * @valp: value to write
6154 * Issues a FW command through the given mailbox to write a PHY register.
6156 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6157 unsigned int mmd, unsigned int reg, unsigned int val)
6160 struct fw_ldst_cmd c;
6162 memset(&c, 0, sizeof(c));
6163 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
6164 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6165 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
6167 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6168 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
6169 V_FW_LDST_CMD_MMD(mmd));
6170 c.u.mdio.raddr = cpu_to_be16(reg);
6171 c.u.mdio.rval = cpu_to_be16(val);
6173 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6178 * t4_sge_decode_idma_state - decode the idma state
6179 * @adap: the adapter
6180 * @state: the state idma is stuck in
6182 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
6184 static const char * const t4_decode[] = {
6186 "IDMA_PUSH_MORE_CPL_FIFO",
6187 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6189 "IDMA_PHYSADDR_SEND_PCIEHDR",
6190 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6191 "IDMA_PHYSADDR_SEND_PAYLOAD",
6192 "IDMA_SEND_FIFO_TO_IMSG",
6193 "IDMA_FL_REQ_DATA_FL_PREP",
6194 "IDMA_FL_REQ_DATA_FL",
6196 "IDMA_FL_H_REQ_HEADER_FL",
6197 "IDMA_FL_H_SEND_PCIEHDR",
6198 "IDMA_FL_H_PUSH_CPL_FIFO",
6199 "IDMA_FL_H_SEND_CPL",
6200 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6201 "IDMA_FL_H_SEND_IP_HDR",
6202 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6203 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6204 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6205 "IDMA_FL_D_SEND_PCIEHDR",
6206 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6207 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6208 "IDMA_FL_SEND_PCIEHDR",
6209 "IDMA_FL_PUSH_CPL_FIFO",
6211 "IDMA_FL_SEND_PAYLOAD_FIRST",
6212 "IDMA_FL_SEND_PAYLOAD",
6213 "IDMA_FL_REQ_NEXT_DATA_FL",
6214 "IDMA_FL_SEND_NEXT_PCIEHDR",
6215 "IDMA_FL_SEND_PADDING",
6216 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6217 "IDMA_FL_SEND_FIFO_TO_IMSG",
6218 "IDMA_FL_REQ_DATAFL_DONE",
6219 "IDMA_FL_REQ_HEADERFL_DONE",
6221 static const char * const t5_decode[] = {
6224 "IDMA_PUSH_MORE_CPL_FIFO",
6225 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6226 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6227 "IDMA_PHYSADDR_SEND_PCIEHDR",
6228 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6229 "IDMA_PHYSADDR_SEND_PAYLOAD",
6230 "IDMA_SEND_FIFO_TO_IMSG",
6231 "IDMA_FL_REQ_DATA_FL",
6233 "IDMA_FL_DROP_SEND_INC",
6234 "IDMA_FL_H_REQ_HEADER_FL",
6235 "IDMA_FL_H_SEND_PCIEHDR",
6236 "IDMA_FL_H_PUSH_CPL_FIFO",
6237 "IDMA_FL_H_SEND_CPL",
6238 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6239 "IDMA_FL_H_SEND_IP_HDR",
6240 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6241 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6242 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6243 "IDMA_FL_D_SEND_PCIEHDR",
6244 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6245 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6246 "IDMA_FL_SEND_PCIEHDR",
6247 "IDMA_FL_PUSH_CPL_FIFO",
6249 "IDMA_FL_SEND_PAYLOAD_FIRST",
6250 "IDMA_FL_SEND_PAYLOAD",
6251 "IDMA_FL_REQ_NEXT_DATA_FL",
6252 "IDMA_FL_SEND_NEXT_PCIEHDR",
6253 "IDMA_FL_SEND_PADDING",
6254 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6256 static const char * const t6_decode[] = {
6258 "IDMA_PUSH_MORE_CPL_FIFO",
6259 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6260 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6261 "IDMA_PHYSADDR_SEND_PCIEHDR",
6262 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6263 "IDMA_PHYSADDR_SEND_PAYLOAD",
6264 "IDMA_FL_REQ_DATA_FL",
6266 "IDMA_FL_DROP_SEND_INC",
6267 "IDMA_FL_H_REQ_HEADER_FL",
6268 "IDMA_FL_H_SEND_PCIEHDR",
6269 "IDMA_FL_H_PUSH_CPL_FIFO",
6270 "IDMA_FL_H_SEND_CPL",
6271 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6272 "IDMA_FL_H_SEND_IP_HDR",
6273 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6274 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6275 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6276 "IDMA_FL_D_SEND_PCIEHDR",
6277 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6278 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6279 "IDMA_FL_SEND_PCIEHDR",
6280 "IDMA_FL_PUSH_CPL_FIFO",
6282 "IDMA_FL_SEND_PAYLOAD_FIRST",
6283 "IDMA_FL_SEND_PAYLOAD",
6284 "IDMA_FL_REQ_NEXT_DATA_FL",
6285 "IDMA_FL_SEND_NEXT_PCIEHDR",
6286 "IDMA_FL_SEND_PADDING",
6287 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6289 static const u32 sge_regs[] = {
6290 A_SGE_DEBUG_DATA_LOW_INDEX_2,
6291 A_SGE_DEBUG_DATA_LOW_INDEX_3,
6292 A_SGE_DEBUG_DATA_HIGH_INDEX_10,
6294 const char * const *sge_idma_decode;
6295 int sge_idma_decode_nstates;
6297 unsigned int chip_version = chip_id(adapter);
6299 /* Select the right set of decode strings to dump depending on the
6300 * adapter chip type.
6302 switch (chip_version) {
6304 sge_idma_decode = (const char * const *)t4_decode;
6305 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
6309 sge_idma_decode = (const char * const *)t5_decode;
6310 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
6314 sge_idma_decode = (const char * const *)t6_decode;
6315 sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
6319 CH_ERR(adapter, "Unsupported chip version %d\n", chip_version);
6323 if (state < sge_idma_decode_nstates)
6324 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
6326 CH_WARN(adapter, "idma state %d unknown\n", state);
6328 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
6329 CH_WARN(adapter, "SGE register %#x value %#x\n",
6330 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
6334 * t4_sge_ctxt_flush - flush the SGE context cache
6335 * @adap: the adapter
6336 * @mbox: mailbox to use for the FW command
6338 * Issues a FW command through the given mailbox to flush the
6339 * SGE context cache.
6341 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
6345 struct fw_ldst_cmd c;
6347 memset(&c, 0, sizeof(c));
6348 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC);
6349 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6350 F_FW_CMD_REQUEST | F_FW_CMD_READ |
6352 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6353 c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH);
6355 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6360 * t4_fw_hello - establish communication with FW
6361 * @adap: the adapter
6362 * @mbox: mailbox to use for the FW command
6363 * @evt_mbox: mailbox to receive async FW events
6364 * @master: specifies the caller's willingness to be the device master
6365 * @state: returns the current device state (if non-NULL)
6367 * Issues a command to establish communication with FW. Returns either
6368 * an error (negative integer) or the mailbox of the Master PF.
6370 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
6371 enum dev_master master, enum dev_state *state)
6374 struct fw_hello_cmd c;
6376 unsigned int master_mbox;
6377 int retries = FW_CMD_HELLO_RETRIES;
6380 memset(&c, 0, sizeof(c));
6381 INIT_CMD(c, HELLO, WRITE);
6382 c.err_to_clearinit = cpu_to_be32(
6383 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
6384 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
6385 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ?
6386 mbox : M_FW_HELLO_CMD_MBMASTER) |
6387 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
6388 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
6389 F_FW_HELLO_CMD_CLEARINIT);
6392 * Issue the HELLO command to the firmware. If it's not successful
6393 * but indicates that we got a "busy" or "timeout" condition, retry
6394 * the HELLO until we exhaust our retry limit. If we do exceed our
6395 * retry limit, check to see if the firmware left us any error
6396 * information and report that if so ...
6398 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6399 if (ret != FW_SUCCESS) {
6400 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
6402 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
6403 t4_report_fw_error(adap);
6407 v = be32_to_cpu(c.err_to_clearinit);
6408 master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
6410 if (v & F_FW_HELLO_CMD_ERR)
6411 *state = DEV_STATE_ERR;
6412 else if (v & F_FW_HELLO_CMD_INIT)
6413 *state = DEV_STATE_INIT;
6415 *state = DEV_STATE_UNINIT;
6419 * If we're not the Master PF then we need to wait around for the
6420 * Master PF Driver to finish setting up the adapter.
6422 * Note that we also do this wait if we're a non-Master-capable PF and
6423 * there is no current Master PF; a Master PF may show up momentarily
6424 * and we wouldn't want to fail pointlessly. (This can happen when an
6425 * OS loads lots of different drivers rapidly at the same time). In
6426 * this case, the Master PF returned by the firmware will be
6427 * M_PCIE_FW_MASTER so the test below will work ...
6429 if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
6430 master_mbox != mbox) {
6431 int waiting = FW_CMD_HELLO_TIMEOUT;
6434 * Wait for the firmware to either indicate an error or
6435 * initialized state. If we see either of these we bail out
6436 * and report the issue to the caller. If we exhaust the
6437 * "hello timeout" and we haven't exhausted our retries, try
6438 * again. Otherwise bail with a timeout error.
6447 * If neither Error nor Initialialized are indicated
6448 * by the firmware keep waiting till we exhaust our
6449 * timeout ... and then retry if we haven't exhausted
6452 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
6453 if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
6464 * We either have an Error or Initialized condition
6465 * report errors preferentially.
6468 if (pcie_fw & F_PCIE_FW_ERR)
6469 *state = DEV_STATE_ERR;
6470 else if (pcie_fw & F_PCIE_FW_INIT)
6471 *state = DEV_STATE_INIT;
6475 * If we arrived before a Master PF was selected and
6476 * there's not a valid Master PF, grab its identity
6479 if (master_mbox == M_PCIE_FW_MASTER &&
6480 (pcie_fw & F_PCIE_FW_MASTER_VLD))
6481 master_mbox = G_PCIE_FW_MASTER(pcie_fw);
6490 * t4_fw_bye - end communication with FW
6491 * @adap: the adapter
6492 * @mbox: mailbox to use for the FW command
6494 * Issues a command to terminate communication with FW.
6496 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
6498 struct fw_bye_cmd c;
6500 memset(&c, 0, sizeof(c));
6501 INIT_CMD(c, BYE, WRITE);
6502 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6506 * t4_fw_reset - issue a reset to FW
6507 * @adap: the adapter
6508 * @mbox: mailbox to use for the FW command
6509 * @reset: specifies the type of reset to perform
6511 * Issues a reset command of the specified type to FW.
6513 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
6515 struct fw_reset_cmd c;
6517 memset(&c, 0, sizeof(c));
6518 INIT_CMD(c, RESET, WRITE);
6519 c.val = cpu_to_be32(reset);
6520 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6524 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
6525 * @adap: the adapter
6526 * @mbox: mailbox to use for the FW RESET command (if desired)
6527 * @force: force uP into RESET even if FW RESET command fails
6529 * Issues a RESET command to firmware (if desired) with a HALT indication
6530 * and then puts the microprocessor into RESET state. The RESET command
6531 * will only be issued if a legitimate mailbox is provided (mbox <=
6532 * M_PCIE_FW_MASTER).
6534 * This is generally used in order for the host to safely manipulate the
6535 * adapter without fear of conflicting with whatever the firmware might
6536 * be doing. The only way out of this state is to RESTART the firmware
6539 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
6544 * If a legitimate mailbox is provided, issue a RESET command
6545 * with a HALT indication.
6547 if (mbox <= M_PCIE_FW_MASTER) {
6548 struct fw_reset_cmd c;
6550 memset(&c, 0, sizeof(c));
6551 INIT_CMD(c, RESET, WRITE);
6552 c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
6553 c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
6554 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6558 * Normally we won't complete the operation if the firmware RESET
6559 * command fails but if our caller insists we'll go ahead and put the
6560 * uP into RESET. This can be useful if the firmware is hung or even
6561 * missing ... We'll have to take the risk of putting the uP into
6562 * RESET without the cooperation of firmware in that case.
6564 * We also force the firmware's HALT flag to be on in case we bypassed
6565 * the firmware RESET command above or we're dealing with old firmware
6566 * which doesn't have the HALT capability. This will serve as a flag
6567 * for the incoming firmware to know that it's coming out of a HALT
6568 * rather than a RESET ... if it's new enough to understand that ...
6570 if (ret == 0 || force) {
6571 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
6572 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
6577 * And we always return the result of the firmware RESET command
6578 * even when we force the uP into RESET ...
6584 * t4_fw_restart - restart the firmware by taking the uP out of RESET
6585 * @adap: the adapter
6586 * @reset: if we want to do a RESET to restart things
6588 * Restart firmware previously halted by t4_fw_halt(). On successful
6589 * return the previous PF Master remains as the new PF Master and there
6590 * is no need to issue a new HELLO command, etc.
6592 * We do this in two ways:
6594 * 1. If we're dealing with newer firmware we'll simply want to take
6595 * the chip's microprocessor out of RESET. This will cause the
6596 * firmware to start up from its start vector. And then we'll loop
6597 * until the firmware indicates it's started again (PCIE_FW.HALT
6598 * reset to 0) or we timeout.
6600 * 2. If we're dealing with older firmware then we'll need to RESET
6601 * the chip since older firmware won't recognize the PCIE_FW.HALT
6602 * flag and automatically RESET itself on startup.
6604 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
6608 * Since we're directing the RESET instead of the firmware
6609 * doing it automatically, we need to clear the PCIE_FW.HALT
6612 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
6615 * If we've been given a valid mailbox, first try to get the
6616 * firmware to do the RESET. If that works, great and we can
6617 * return success. Otherwise, if we haven't been given a
6618 * valid mailbox or the RESET command failed, fall back to
6619 * hitting the chip with a hammer.
6621 if (mbox <= M_PCIE_FW_MASTER) {
6622 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
6624 if (t4_fw_reset(adap, mbox,
6625 F_PIORST | F_PIORSTMODE) == 0)
6629 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
6634 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
6635 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
6636 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
6647 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
6648 * @adap: the adapter
6649 * @mbox: mailbox to use for the FW RESET command (if desired)
6650 * @fw_data: the firmware image to write
6652 * @force: force upgrade even if firmware doesn't cooperate
6654 * Perform all of the steps necessary for upgrading an adapter's
6655 * firmware image. Normally this requires the cooperation of the
6656 * existing firmware in order to halt all existing activities
6657 * but if an invalid mailbox token is passed in we skip that step
6658 * (though we'll still put the adapter microprocessor into RESET in
6661 * On successful return the new firmware will have been loaded and
6662 * the adapter will have been fully RESET losing all previous setup
6663 * state. On unsuccessful return the adapter may be completely hosed ...
6664 * positive errno indicates that the adapter is ~probably~ intact, a
6665 * negative errno indicates that things are looking bad ...
6667 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
6668 const u8 *fw_data, unsigned int size, int force)
6670 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
6671 unsigned int bootstrap =
6672 be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
6675 if (!t4_fw_matches_chip(adap, fw_hdr))
6679 ret = t4_fw_halt(adap, mbox, force);
6680 if (ret < 0 && !force)
6684 ret = t4_load_fw(adap, fw_data, size);
6685 if (ret < 0 || bootstrap)
6689 * Older versions of the firmware don't understand the new
6690 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
6691 * restart. So for newly loaded older firmware we'll have to do the
6692 * RESET for it so it starts up on a clean slate. We can tell if
6693 * the newly loaded firmware will handle this right by checking
6694 * its header flags to see if it advertises the capability.
6696 reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
6697 return t4_fw_restart(adap, mbox, reset);
6701 * t4_fw_initialize - ask FW to initialize the device
6702 * @adap: the adapter
6703 * @mbox: mailbox to use for the FW command
6705 * Issues a command to FW to partially initialize the device. This
6706 * performs initialization that generally doesn't depend on user input.
6708 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
6710 struct fw_initialize_cmd c;
6712 memset(&c, 0, sizeof(c));
6713 INIT_CMD(c, INITIALIZE, WRITE);
6714 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6718 * t4_query_params_rw - query FW or device parameters
6719 * @adap: the adapter
6720 * @mbox: mailbox to use for the FW command
6723 * @nparams: the number of parameters
6724 * @params: the parameter names
6725 * @val: the parameter values
6726 * @rw: Write and read flag
6728 * Reads the value of FW or device parameters. Up to 7 parameters can be
6731 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
6732 unsigned int vf, unsigned int nparams, const u32 *params,
6736 struct fw_params_cmd c;
6737 __be32 *p = &c.param[0].mnem;
6742 memset(&c, 0, sizeof(c));
6743 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
6744 F_FW_CMD_REQUEST | F_FW_CMD_READ |
6745 V_FW_PARAMS_CMD_PFN(pf) |
6746 V_FW_PARAMS_CMD_VFN(vf));
6747 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6749 for (i = 0; i < nparams; i++) {
6750 *p++ = cpu_to_be32(*params++);
6752 *p = cpu_to_be32(*(val + i));
6756 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6758 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
6759 *val++ = be32_to_cpu(*p);
6763 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
6764 unsigned int vf, unsigned int nparams, const u32 *params,
6767 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
6771 * t4_set_params_timeout - sets FW or device parameters
6772 * @adap: the adapter
6773 * @mbox: mailbox to use for the FW command
6776 * @nparams: the number of parameters
6777 * @params: the parameter names
6778 * @val: the parameter values
6779 * @timeout: the timeout time
6781 * Sets the value of FW or device parameters. Up to 7 parameters can be
6782 * specified at once.
6784 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
6785 unsigned int pf, unsigned int vf,
6786 unsigned int nparams, const u32 *params,
6787 const u32 *val, int timeout)
6789 struct fw_params_cmd c;
6790 __be32 *p = &c.param[0].mnem;
6795 memset(&c, 0, sizeof(c));
6796 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
6797 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
6798 V_FW_PARAMS_CMD_PFN(pf) |
6799 V_FW_PARAMS_CMD_VFN(vf));
6800 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6803 *p++ = cpu_to_be32(*params++);
6804 *p++ = cpu_to_be32(*val++);
6807 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
6811 * t4_set_params - sets FW or device parameters
6812 * @adap: the adapter
6813 * @mbox: mailbox to use for the FW command
6816 * @nparams: the number of parameters
6817 * @params: the parameter names
6818 * @val: the parameter values
6820 * Sets the value of FW or device parameters. Up to 7 parameters can be
6821 * specified at once.
6823 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
6824 unsigned int vf, unsigned int nparams, const u32 *params,
6827 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
6828 FW_CMD_MAX_TIMEOUT);
6832 * t4_cfg_pfvf - configure PF/VF resource limits
6833 * @adap: the adapter
6834 * @mbox: mailbox to use for the FW command
6835 * @pf: the PF being configured
6836 * @vf: the VF being configured
6837 * @txq: the max number of egress queues
6838 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
6839 * @rxqi: the max number of interrupt-capable ingress queues
6840 * @rxq: the max number of interruptless ingress queues
6841 * @tc: the PCI traffic class
6842 * @vi: the max number of virtual interfaces
6843 * @cmask: the channel access rights mask for the PF/VF
6844 * @pmask: the port access rights mask for the PF/VF
6845 * @nexact: the maximum number of exact MPS filters
6846 * @rcaps: read capabilities
6847 * @wxcaps: write/execute capabilities
6849 * Configures resource limits and capabilities for a physical or virtual
6852 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
6853 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
6854 unsigned int rxqi, unsigned int rxq, unsigned int tc,
6855 unsigned int vi, unsigned int cmask, unsigned int pmask,
6856 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
6858 struct fw_pfvf_cmd c;
6860 memset(&c, 0, sizeof(c));
6861 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
6862 F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
6863 V_FW_PFVF_CMD_VFN(vf));
6864 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6865 c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
6866 V_FW_PFVF_CMD_NIQ(rxq));
6867 c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) |
6868 V_FW_PFVF_CMD_PMASK(pmask) |
6869 V_FW_PFVF_CMD_NEQ(txq));
6870 c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) |
6871 V_FW_PFVF_CMD_NVI(vi) |
6872 V_FW_PFVF_CMD_NEXACTF(nexact));
6873 c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) |
6874 V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
6875 V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
6876 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6880 * t4_alloc_vi_func - allocate a virtual interface
6881 * @adap: the adapter
6882 * @mbox: mailbox to use for the FW command
6883 * @port: physical port associated with the VI
6884 * @pf: the PF owning the VI
6885 * @vf: the VF owning the VI
6886 * @nmac: number of MAC addresses needed (1 to 5)
6887 * @mac: the MAC addresses of the VI
6888 * @rss_size: size of RSS table slice associated with this VI
6889 * @portfunc: which Port Application Function MAC Address is desired
6890 * @idstype: Intrusion Detection Type
6892 * Allocates a virtual interface for the given physical port. If @mac is
6893 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
6894 * If @rss_size is %NULL the VI is not assigned any RSS slice by FW.
6895 * @mac should be large enough to hold @nmac Ethernet addresses, they are
6896 * stored consecutively so the space needed is @nmac * 6 bytes.
6897 * Returns a negative error number or the non-negative VI id.
6899 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
6900 unsigned int port, unsigned int pf, unsigned int vf,
6901 unsigned int nmac, u8 *mac, u16 *rss_size,
6902 unsigned int portfunc, unsigned int idstype)
6907 memset(&c, 0, sizeof(c));
6908 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
6909 F_FW_CMD_WRITE | F_FW_CMD_EXEC |
6910 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
6911 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
6912 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
6913 V_FW_VI_CMD_FUNC(portfunc));
6914 c.portid_pkd = V_FW_VI_CMD_PORTID(port);
6917 c.norss_rsssize = F_FW_VI_CMD_NORSS;
6919 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6924 memcpy(mac, c.mac, sizeof(c.mac));
6927 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
6929 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
6931 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
6933 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
6937 *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
6938 return G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid));
6942 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface
6943 * @adap: the adapter
6944 * @mbox: mailbox to use for the FW command
6945 * @port: physical port associated with the VI
6946 * @pf: the PF owning the VI
6947 * @vf: the VF owning the VI
6948 * @nmac: number of MAC addresses needed (1 to 5)
6949 * @mac: the MAC addresses of the VI
6950 * @rss_size: size of RSS table slice associated with this VI
6952 * backwards compatible and convieniance routine to allocate a Virtual
6953 * Interface with a Ethernet Port Application Function and Intrustion
6954 * Detection System disabled.
6956 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
6957 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
6960 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
6965 * t4_free_vi - free a virtual interface
6966 * @adap: the adapter
6967 * @mbox: mailbox to use for the FW command
6968 * @pf: the PF owning the VI
6969 * @vf: the VF owning the VI
6970 * @viid: virtual interface identifiler
6972 * Free a previously allocated virtual interface.
6974 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
6975 unsigned int vf, unsigned int viid)
6979 memset(&c, 0, sizeof(c));
6980 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
6983 V_FW_VI_CMD_PFN(pf) |
6984 V_FW_VI_CMD_VFN(vf));
6985 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
6986 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
6988 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6992 * t4_set_rxmode - set Rx properties of a virtual interface
6993 * @adap: the adapter
6994 * @mbox: mailbox to use for the FW command
6996 * @mtu: the new MTU or -1
6997 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
6998 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
6999 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
7000 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
7001 * @sleep_ok: if true we may sleep while awaiting command completion
7003 * Sets Rx properties of a virtual interface.
7005 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
7006 int mtu, int promisc, int all_multi, int bcast, int vlanex,
7009 struct fw_vi_rxmode_cmd c;
7011 /* convert to FW values */
7013 mtu = M_FW_VI_RXMODE_CMD_MTU;
7015 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
7017 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
7019 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
7021 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
7023 memset(&c, 0, sizeof(c));
7024 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
7025 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7026 V_FW_VI_RXMODE_CMD_VIID(viid));
7027 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7029 cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
7030 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
7031 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
7032 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
7033 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
7034 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7038 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
7039 * @adap: the adapter
7040 * @mbox: mailbox to use for the FW command
7042 * @free: if true any existing filters for this VI id are first removed
7043 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
7044 * @addr: the MAC address(es)
7045 * @idx: where to store the index of each allocated filter
7046 * @hash: pointer to hash address filter bitmap
7047 * @sleep_ok: call is allowed to sleep
7049 * Allocates an exact-match filter for each of the supplied addresses and
7050 * sets it to the corresponding address. If @idx is not %NULL it should
7051 * have at least @naddr entries, each of which will be set to the index of
7052 * the filter allocated for the corresponding MAC address. If a filter
7053 * could not be allocated for an address its index is set to 0xffff.
7054 * If @hash is not %NULL addresses that fail to allocate an exact filter
7055 * are hashed and update the hash filter bitmap pointed at by @hash.
7057 * Returns a negative error number or the number of filters allocated.
7059 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
7060 unsigned int viid, bool free, unsigned int naddr,
7061 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
7063 int offset, ret = 0;
7064 struct fw_vi_mac_cmd c;
7065 unsigned int nfilters = 0;
7066 unsigned int max_naddr = adap->chip_params->mps_tcam_size;
7067 unsigned int rem = naddr;
7069 if (naddr > max_naddr)
7072 for (offset = 0; offset < naddr ; /**/) {
7073 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
7075 : ARRAY_SIZE(c.u.exact));
7076 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
7077 u.exact[fw_naddr]), 16);
7078 struct fw_vi_mac_exact *p;
7081 memset(&c, 0, sizeof(c));
7082 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
7085 V_FW_CMD_EXEC(free) |
7086 V_FW_VI_MAC_CMD_VIID(viid));
7087 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) |
7088 V_FW_CMD_LEN16(len16));
7090 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7092 cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
7093 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
7094 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
7098 * It's okay if we run out of space in our MAC address arena.
7099 * Some of the addresses we submit may get stored so we need
7100 * to run through the reply to see what the results were ...
7102 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7103 if (ret && ret != -FW_ENOMEM)
7106 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7107 u16 index = G_FW_VI_MAC_CMD_IDX(
7108 be16_to_cpu(p->valid_to_idx));
7111 idx[offset+i] = (index >= max_naddr
7114 if (index < max_naddr)
7117 *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
7125 if (ret == 0 || ret == -FW_ENOMEM)
7131 * t4_change_mac - modifies the exact-match filter for a MAC address
7132 * @adap: the adapter
7133 * @mbox: mailbox to use for the FW command
7135 * @idx: index of existing filter for old value of MAC address, or -1
7136 * @addr: the new MAC address value
7137 * @persist: whether a new MAC allocation should be persistent
7138 * @add_smt: if true also add the address to the HW SMT
7140 * Modifies an exact-match filter and sets it to the new MAC address if
7141 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
7142 * latter case the address is added persistently if @persist is %true.
7144 * Note that in general it is not possible to modify the value of a given
7145 * filter so the generic way to modify an address filter is to free the one
7146 * being used by the old address value and allocate a new filter for the
7147 * new address value.
7149 * Returns a negative error number or the index of the filter with the new
7150 * MAC value. Note that this index may differ from @idx.
7152 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
7153 int idx, const u8 *addr, bool persist, bool add_smt)
7156 struct fw_vi_mac_cmd c;
7157 struct fw_vi_mac_exact *p = c.u.exact;
7158 unsigned int max_mac_addr = adap->chip_params->mps_tcam_size;
7160 if (idx < 0) /* new allocation */
7161 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
7162 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
7164 memset(&c, 0, sizeof(c));
7165 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
7166 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7167 V_FW_VI_MAC_CMD_VIID(viid));
7168 c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1));
7169 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
7170 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
7171 V_FW_VI_MAC_CMD_IDX(idx));
7172 memcpy(p->macaddr, addr, sizeof(p->macaddr));
7174 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7176 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
7177 if (ret >= max_mac_addr)
7184 * t4_set_addr_hash - program the MAC inexact-match hash filter
7185 * @adap: the adapter
7186 * @mbox: mailbox to use for the FW command
7188 * @ucast: whether the hash filter should also match unicast addresses
7189 * @vec: the value to be written to the hash filter
7190 * @sleep_ok: call is allowed to sleep
7192 * Sets the 64-bit inexact-match hash filter for a virtual interface.
7194 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
7195 bool ucast, u64 vec, bool sleep_ok)
7197 struct fw_vi_mac_cmd c;
7200 memset(&c, 0, sizeof(c));
7201 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
7202 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7203 V_FW_VI_ENABLE_CMD_VIID(viid));
7204 val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) |
7205 V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1);
7206 c.freemacs_to_len16 = cpu_to_be32(val);
7207 c.u.hash.hashvec = cpu_to_be64(vec);
7208 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7212 * t4_enable_vi_params - enable/disable a virtual interface
7213 * @adap: the adapter
7214 * @mbox: mailbox to use for the FW command
7216 * @rx_en: 1=enable Rx, 0=disable Rx
7217 * @tx_en: 1=enable Tx, 0=disable Tx
7218 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
7220 * Enables/disables a virtual interface. Note that setting DCB Enable
7221 * only makes sense when enabling a Virtual Interface ...
7223 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
7224 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
7226 struct fw_vi_enable_cmd c;
7228 memset(&c, 0, sizeof(c));
7229 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
7230 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7231 V_FW_VI_ENABLE_CMD_VIID(viid));
7232 c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
7233 V_FW_VI_ENABLE_CMD_EEN(tx_en) |
7234 V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
7236 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
7240 * t4_enable_vi - enable/disable a virtual interface
7241 * @adap: the adapter
7242 * @mbox: mailbox to use for the FW command
7244 * @rx_en: 1=enable Rx, 0=disable Rx
7245 * @tx_en: 1=enable Tx, 0=disable Tx
7247 * Enables/disables a virtual interface. Note that setting DCB Enable
7248 * only makes sense when enabling a Virtual Interface ...
7250 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
7251 bool rx_en, bool tx_en)
7253 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
7257 * t4_identify_port - identify a VI's port by blinking its LED
7258 * @adap: the adapter
7259 * @mbox: mailbox to use for the FW command
7261 * @nblinks: how many times to blink LED at 2.5 Hz
7263 * Identifies a VI's port by blinking its LED.
7265 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
7266 unsigned int nblinks)
7268 struct fw_vi_enable_cmd c;
7270 memset(&c, 0, sizeof(c));
7271 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
7272 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7273 V_FW_VI_ENABLE_CMD_VIID(viid));
7274 c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
7275 c.blinkdur = cpu_to_be16(nblinks);
7276 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7280 * t4_iq_stop - stop an ingress queue and its FLs
7281 * @adap: the adapter
7282 * @mbox: mailbox to use for the FW command
7283 * @pf: the PF owning the queues
7284 * @vf: the VF owning the queues
7285 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
7286 * @iqid: ingress queue id
7287 * @fl0id: FL0 queue id or 0xffff if no attached FL0
7288 * @fl1id: FL1 queue id or 0xffff if no attached FL1
7290 * Stops an ingress queue and its associated FLs, if any. This causes
7291 * any current or future data/messages destined for these queues to be
7294 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
7295 unsigned int vf, unsigned int iqtype, unsigned int iqid,
7296 unsigned int fl0id, unsigned int fl1id)
7300 memset(&c, 0, sizeof(c));
7301 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
7302 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
7303 V_FW_IQ_CMD_VFN(vf));
7304 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c));
7305 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
7306 c.iqid = cpu_to_be16(iqid);
7307 c.fl0id = cpu_to_be16(fl0id);
7308 c.fl1id = cpu_to_be16(fl1id);
7309 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7313 * t4_iq_free - free an ingress queue and its FLs
7314 * @adap: the adapter
7315 * @mbox: mailbox to use for the FW command
7316 * @pf: the PF owning the queues
7317 * @vf: the VF owning the queues
7318 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
7319 * @iqid: ingress queue id
7320 * @fl0id: FL0 queue id or 0xffff if no attached FL0
7321 * @fl1id: FL1 queue id or 0xffff if no attached FL1
7323 * Frees an ingress queue and its associated FLs, if any.
7325 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7326 unsigned int vf, unsigned int iqtype, unsigned int iqid,
7327 unsigned int fl0id, unsigned int fl1id)
7331 memset(&c, 0, sizeof(c));
7332 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
7333 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
7334 V_FW_IQ_CMD_VFN(vf));
7335 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
7336 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
7337 c.iqid = cpu_to_be16(iqid);
7338 c.fl0id = cpu_to_be16(fl0id);
7339 c.fl1id = cpu_to_be16(fl1id);
7340 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7344 * t4_eth_eq_free - free an Ethernet egress queue
7345 * @adap: the adapter
7346 * @mbox: mailbox to use for the FW command
7347 * @pf: the PF owning the queue
7348 * @vf: the VF owning the queue
7349 * @eqid: egress queue id
7351 * Frees an Ethernet egress queue.
7353 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7354 unsigned int vf, unsigned int eqid)
7356 struct fw_eq_eth_cmd c;
7358 memset(&c, 0, sizeof(c));
7359 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
7360 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7361 V_FW_EQ_ETH_CMD_PFN(pf) |
7362 V_FW_EQ_ETH_CMD_VFN(vf));
7363 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
7364 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
7365 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7369 * t4_ctrl_eq_free - free a control egress queue
7370 * @adap: the adapter
7371 * @mbox: mailbox to use for the FW command
7372 * @pf: the PF owning the queue
7373 * @vf: the VF owning the queue
7374 * @eqid: egress queue id
7376 * Frees a control egress queue.
7378 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7379 unsigned int vf, unsigned int eqid)
7381 struct fw_eq_ctrl_cmd c;
7383 memset(&c, 0, sizeof(c));
7384 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
7385 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7386 V_FW_EQ_CTRL_CMD_PFN(pf) |
7387 V_FW_EQ_CTRL_CMD_VFN(vf));
7388 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
7389 c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
7390 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7394 * t4_ofld_eq_free - free an offload egress queue
7395 * @adap: the adapter
7396 * @mbox: mailbox to use for the FW command
7397 * @pf: the PF owning the queue
7398 * @vf: the VF owning the queue
7399 * @eqid: egress queue id
7401 * Frees a control egress queue.
7403 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7404 unsigned int vf, unsigned int eqid)
7406 struct fw_eq_ofld_cmd c;
7408 memset(&c, 0, sizeof(c));
7409 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) |
7410 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7411 V_FW_EQ_OFLD_CMD_PFN(pf) |
7412 V_FW_EQ_OFLD_CMD_VFN(vf));
7413 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
7414 c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid));
7415 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7419 * t4_link_down_rc_str - return a string for a Link Down Reason Code
7420 * @link_down_rc: Link Down Reason Code
7422 * Returns a string representation of the Link Down Reason Code.
7424 const char *t4_link_down_rc_str(unsigned char link_down_rc)
7426 static const char *reason[] = {
7429 "Auto-negotiation Failure",
7431 "Insufficient Airflow",
7432 "Unable To Determine Reason",
7433 "No RX Signal Detected",
7437 if (link_down_rc >= ARRAY_SIZE(reason))
7438 return "Bad Reason Code";
7440 return reason[link_down_rc];
7444 * t4_handle_fw_rpl - process a FW reply message
7445 * @adap: the adapter
7446 * @rpl: start of the FW message
7448 * Processes a FW message, such as link state change messages.
7450 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
7452 u8 opcode = *(const u8 *)rpl;
7453 const struct fw_port_cmd *p = (const void *)rpl;
7454 unsigned int action =
7455 G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
7457 if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
7458 /* link/module state change message */
7459 int speed = 0, fc = 0, i;
7460 int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
7461 struct port_info *pi = NULL;
7462 struct link_config *lc;
7463 u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
7464 int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
7465 u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
7467 if (stat & F_FW_PORT_CMD_RXPAUSE)
7469 if (stat & F_FW_PORT_CMD_TXPAUSE)
7471 if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
7473 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
7475 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
7477 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
7479 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
7481 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
7484 for_each_port(adap, i) {
7485 pi = adap2pinfo(adap, i);
7486 if (pi->tx_chan == chan)
7491 if (mod != pi->mod_type) {
7493 t4_os_portmod_changed(adap, i);
7495 if (link_ok != lc->link_ok || speed != lc->speed ||
7496 fc != lc->fc) { /* something changed */
7499 if (!link_ok && lc->link_ok)
7500 reason = G_FW_PORT_CMD_LINKDNRC(stat);
7504 lc->link_ok = link_ok;
7507 lc->supported = be16_to_cpu(p->u.info.pcap);
7508 t4_os_link_changed(adap, i, link_ok, reason);
7511 CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode);
7518 * get_pci_mode - determine a card's PCI mode
7519 * @adapter: the adapter
7520 * @p: where to store the PCI settings
7522 * Determines a card's PCI mode and associated parameters, such as speed
7525 static void get_pci_mode(struct adapter *adapter,
7526 struct pci_params *p)
7531 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
7533 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
7534 p->speed = val & PCI_EXP_LNKSTA_CLS;
7535 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
7540 * init_link_config - initialize a link's SW state
7541 * @lc: structure holding the link state
7542 * @caps: link capabilities
7544 * Initializes the SW state maintained for each link, including the link's
7545 * capabilities and default speed/flow-control/autonegotiation settings.
7547 static void init_link_config(struct link_config *lc, unsigned int caps)
7549 lc->supported = caps;
7550 lc->requested_speed = 0;
7552 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
7553 if (lc->supported & FW_PORT_CAP_ANEG) {
7554 lc->advertising = lc->supported & ADVERT_MASK;
7555 lc->autoneg = AUTONEG_ENABLE;
7556 lc->requested_fc |= PAUSE_AUTONEG;
7558 lc->advertising = 0;
7559 lc->autoneg = AUTONEG_DISABLE;
7564 u32 vendor_and_model_id;
7568 int t4_get_flash_params(struct adapter *adapter)
7571 * Table for non-Numonix supported flash parts. Numonix parts are left
7572 * to the preexisting well-tested code. All flash parts have 64KB
7575 static struct flash_desc supported_flash[] = {
7576 { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
7582 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
7584 ret = sf1_read(adapter, 3, 0, 1, &info);
7585 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
7589 for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret)
7590 if (supported_flash[ret].vendor_and_model_id == info) {
7591 adapter->params.sf_size = supported_flash[ret].size_mb;
7592 adapter->params.sf_nsec =
7593 adapter->params.sf_size / SF_SEC_SIZE;
7597 if ((info & 0xff) != 0x20) /* not a Numonix flash */
7599 info >>= 16; /* log2 of size */
7600 if (info >= 0x14 && info < 0x18)
7601 adapter->params.sf_nsec = 1 << (info - 16);
7602 else if (info == 0x18)
7603 adapter->params.sf_nsec = 64;
7606 adapter->params.sf_size = 1 << info;
7609 * We should ~probably~ reject adapters with FLASHes which are too
7610 * small but we have some legacy FPGAs with small FLASHes that we'd
7611 * still like to use. So instead we emit a scary message ...
7613 if (adapter->params.sf_size < FLASH_MIN_SIZE)
7614 CH_WARN(adapter, "WARNING!!! FLASH size %#x < %#x!!!\n",
7615 adapter->params.sf_size, FLASH_MIN_SIZE);
7620 static void set_pcie_completion_timeout(struct adapter *adapter,
7626 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
7628 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
7631 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
7635 const struct chip_params *t4_get_chip_params(int chipid)
7637 static const struct chip_params chip_params[] = {
7641 .pm_stats_cnt = PM_NSTATS,
7642 .cng_ch_bits_log = 2,
7644 .cim_num_obq = CIM_NUM_OBQ,
7645 .mps_rplc_size = 128,
7647 .sge_fl_db = F_DBPRIO,
7648 .mps_tcam_size = NUM_MPS_CLS_SRAM_L_INSTANCES,
7653 .pm_stats_cnt = PM_NSTATS,
7654 .cng_ch_bits_log = 2,
7656 .cim_num_obq = CIM_NUM_OBQ_T5,
7657 .mps_rplc_size = 128,
7659 .sge_fl_db = F_DBPRIO | F_DBTYPE,
7660 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
7665 .pm_stats_cnt = T6_PM_NSTATS,
7666 .cng_ch_bits_log = 3,
7668 .cim_num_obq = CIM_NUM_OBQ_T5,
7669 .mps_rplc_size = 256,
7672 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
7676 chipid -= CHELSIO_T4;
7677 if (chipid < 0 || chipid >= ARRAY_SIZE(chip_params))
7680 return &chip_params[chipid];
7684 * t4_prep_adapter - prepare SW and HW for operation
7685 * @adapter: the adapter
7686 * @buf: temporary space of at least VPD_LEN size provided by the caller.
7688 * Initialize adapter SW state for the various HW modules, set initial
7689 * values for some adapter tunables, take PHYs out of reset, and
7690 * initialize the MDIO interface.
7692 int t4_prep_adapter(struct adapter *adapter, u8 *buf)
7698 get_pci_mode(adapter, &adapter->params.pci);
7700 pl_rev = t4_read_reg(adapter, A_PL_REV);
7701 adapter->params.chipid = G_CHIPID(pl_rev);
7702 adapter->params.rev = G_REV(pl_rev);
7703 if (adapter->params.chipid == 0) {
7704 /* T4 did not have chipid in PL_REV (T5 onwards do) */
7705 adapter->params.chipid = CHELSIO_T4;
7707 /* T4A1 chip is not supported */
7708 if (adapter->params.rev == 1) {
7709 CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n");
7714 adapter->chip_params = t4_get_chip_params(chip_id(adapter));
7715 if (adapter->chip_params == NULL)
7718 adapter->params.pci.vpd_cap_addr =
7719 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
7721 ret = t4_get_flash_params(adapter);
7725 ret = get_vpd_params(adapter, &adapter->params.vpd, buf);
7729 /* Cards with real ASICs have the chipid in the PCIe device id */
7730 t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id);
7731 if (device_id >> 12 == chip_id(adapter))
7732 adapter->params.cim_la_size = CIMLA_SIZE;
7735 adapter->params.fpga = 1;
7736 adapter->params.cim_la_size = 2 * CIMLA_SIZE;
7739 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
7742 * Default port and clock for debugging in case we can't reach FW.
7744 adapter->params.nports = 1;
7745 adapter->params.portvec = 1;
7746 adapter->params.vpd.cclk = 50000;
7748 /* Set pci completion timeout value to 4 seconds. */
7749 set_pcie_completion_timeout(adapter, 0xd);
7754 * t4_shutdown_adapter - shut down adapter, host & wire
7755 * @adapter: the adapter
7757 * Perform an emergency shutdown of the adapter and stop it from
7758 * continuing any further communication on the ports or DMA to the
7759 * host. This is typically used when the adapter and/or firmware
7760 * have crashed and we want to prevent any further accidental
7761 * communication with the rest of the world. This will also force
7762 * the port Link Status to go down -- if register writes work --
7763 * which should help our peers figure out that we're down.
7765 int t4_shutdown_adapter(struct adapter *adapter)
7769 t4_intr_disable(adapter);
7770 t4_write_reg(adapter, A_DBG_GPIO_EN, 0);
7771 for_each_port(adapter, port) {
7772 u32 a_port_cfg = PORT_REG(port,
7777 t4_write_reg(adapter, a_port_cfg,
7778 t4_read_reg(adapter, a_port_cfg)
7779 & ~V_SIGNAL_DET(1));
7781 t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0);
7787 * t4_init_devlog_params - initialize adapter->params.devlog
7788 * @adap: the adapter
7789 * @fw_attach: whether we can talk to the firmware
7791 * Initialize various fields of the adapter's Firmware Device Log
7792 * Parameters structure.
7794 int t4_init_devlog_params(struct adapter *adap, int fw_attach)
7796 struct devlog_params *dparams = &adap->params.devlog;
7798 unsigned int devlog_meminfo;
7799 struct fw_devlog_cmd devlog_cmd;
7802 /* If we're dealing with newer firmware, the Device Log Paramerters
7803 * are stored in a designated register which allows us to access the
7804 * Device Log even if we can't talk to the firmware.
7807 t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG));
7809 unsigned int nentries, nentries128;
7811 dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams);
7812 dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4;
7814 nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams);
7815 nentries = (nentries128 + 1) * 128;
7816 dparams->size = nentries * sizeof(struct fw_devlog_e);
7822 * For any failing returns ...
7824 memset(dparams, 0, sizeof *dparams);
7827 * If we can't talk to the firmware, there's really nothing we can do
7833 /* Otherwise, ask the firmware for it's Device Log Parameters.
7835 memset(&devlog_cmd, 0, sizeof devlog_cmd);
7836 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
7837 F_FW_CMD_REQUEST | F_FW_CMD_READ);
7838 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
7839 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
7845 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
7846 dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo);
7847 dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4;
7848 dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
7854 * t4_init_sge_params - initialize adap->params.sge
7855 * @adapter: the adapter
7857 * Initialize various fields of the adapter's SGE Parameters structure.
7859 int t4_init_sge_params(struct adapter *adapter)
7862 struct sge_params *sp = &adapter->params.sge;
7865 r = t4_read_reg(adapter, A_SGE_INGRESS_RX_THRESHOLD);
7866 sp->counter_val[0] = G_THRESHOLD_0(r);
7867 sp->counter_val[1] = G_THRESHOLD_1(r);
7868 sp->counter_val[2] = G_THRESHOLD_2(r);
7869 sp->counter_val[3] = G_THRESHOLD_3(r);
7871 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_0_AND_1);
7872 sp->timer_val[0] = core_ticks_to_us(adapter, G_TIMERVALUE0(r));
7873 sp->timer_val[1] = core_ticks_to_us(adapter, G_TIMERVALUE1(r));
7874 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_2_AND_3);
7875 sp->timer_val[2] = core_ticks_to_us(adapter, G_TIMERVALUE2(r));
7876 sp->timer_val[3] = core_ticks_to_us(adapter, G_TIMERVALUE3(r));
7877 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_4_AND_5);
7878 sp->timer_val[4] = core_ticks_to_us(adapter, G_TIMERVALUE4(r));
7879 sp->timer_val[5] = core_ticks_to_us(adapter, G_TIMERVALUE5(r));
7881 r = t4_read_reg(adapter, A_SGE_CONM_CTRL);
7882 sp->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1;
7884 sp->fl_starve_threshold2 = sp->fl_starve_threshold;
7885 else if (is_t5(adapter))
7886 sp->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(r) * 2 + 1;
7888 sp->fl_starve_threshold2 = G_T6_EGRTHRESHOLDPACKING(r) * 2 + 1;
7890 /* egress queues: log2 of # of doorbells per BAR2 page */
7891 r = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
7892 r >>= S_QUEUESPERPAGEPF0 +
7893 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
7894 sp->eq_s_qpp = r & M_QUEUESPERPAGEPF0;
7896 /* ingress queues: log2 of # of doorbells per BAR2 page */
7897 r = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
7898 r >>= S_QUEUESPERPAGEPF0 +
7899 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
7900 sp->iq_s_qpp = r & M_QUEUESPERPAGEPF0;
7902 r = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
7903 r >>= S_HOSTPAGESIZEPF0 +
7904 (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf;
7905 sp->page_shift = (r & M_HOSTPAGESIZEPF0) + 10;
7907 r = t4_read_reg(adapter, A_SGE_CONTROL);
7908 sp->sge_control = r;
7909 sp->spg_len = r & F_EGRSTATUSPAGESIZE ? 128 : 64;
7910 sp->fl_pktshift = G_PKTSHIFT(r);
7911 if (chip_id(adapter) <= CHELSIO_T5) {
7912 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) +
7913 X_INGPADBOUNDARY_SHIFT);
7915 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) +
7916 X_T6_INGPADBOUNDARY_SHIFT);
7919 sp->pack_boundary = sp->pad_boundary;
7921 r = t4_read_reg(adapter, A_SGE_CONTROL2);
7922 if (G_INGPACKBOUNDARY(r) == 0)
7923 sp->pack_boundary = 16;
7925 sp->pack_boundary = 1 << (G_INGPACKBOUNDARY(r) + 5);
7927 for (i = 0; i < SGE_FLBUF_SIZES; i++)
7928 sp->sge_fl_buffer_size[i] = t4_read_reg(adapter,
7929 A_SGE_FL_BUFFER_SIZE0 + (4 * i));
7935 * Read and cache the adapter's compressed filter mode and ingress config.
7937 static void read_filter_mode_and_ingress_config(struct adapter *adap)
7939 struct tp_params *tpp = &adap->params.tp;
7941 if (t4_use_ldst(adap)) {
7942 t4_fw_tp_pio_rw(adap, &tpp->vlan_pri_map, 1,
7943 A_TP_VLAN_PRI_MAP, 1);
7944 t4_fw_tp_pio_rw(adap, &tpp->ingress_config, 1,
7945 A_TP_INGRESS_CONFIG, 1);
7947 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
7948 &tpp->vlan_pri_map, 1, A_TP_VLAN_PRI_MAP);
7949 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
7950 &tpp->ingress_config, 1, A_TP_INGRESS_CONFIG);
7954 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
7955 * shift positions of several elements of the Compressed Filter Tuple
7956 * for this adapter which we need frequently ...
7958 tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE);
7959 tpp->port_shift = t4_filter_field_shift(adap, F_PORT);
7960 tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
7961 tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN);
7962 tpp->tos_shift = t4_filter_field_shift(adap, F_TOS);
7963 tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
7964 tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE);
7965 tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH);
7966 tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE);
7967 tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION);
7970 * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
7971 * represents the presense of an Outer VLAN instead of a VNIC ID.
7973 if ((tpp->ingress_config & F_VNIC) == 0)
7974 tpp->vnic_shift = -1;
7978 * t4_init_tp_params - initialize adap->params.tp
7979 * @adap: the adapter
7981 * Initialize various fields of the adapter's TP Parameters structure.
7983 int t4_init_tp_params(struct adapter *adap)
7987 struct tp_params *tpp = &adap->params.tp;
7989 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
7990 tpp->tre = G_TIMERRESOLUTION(v);
7991 tpp->dack_re = G_DELAYEDACKRESOLUTION(v);
7993 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
7994 for (chan = 0; chan < MAX_NCHAN; chan++)
7995 tpp->tx_modq[chan] = chan;
7997 read_filter_mode_and_ingress_config(adap);
8000 * For T6, cache the adapter's compressed error vector
8001 * and passing outer header info for encapsulated packets.
8003 if (chip_id(adap) > CHELSIO_T5) {
8004 v = t4_read_reg(adap, A_TP_OUT_CONFIG);
8005 tpp->rx_pkt_encap = (v & F_CRXPKTENC) ? 1 : 0;
8012 * t4_filter_field_shift - calculate filter field shift
8013 * @adap: the adapter
8014 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
8016 * Return the shift position of a filter field within the Compressed
8017 * Filter Tuple. The filter field is specified via its selection bit
8018 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
8020 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
8022 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
8026 if ((filter_mode & filter_sel) == 0)
8029 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
8030 switch (filter_mode & sel) {
8032 field_shift += W_FT_FCOE;
8035 field_shift += W_FT_PORT;
8038 field_shift += W_FT_VNIC_ID;
8041 field_shift += W_FT_VLAN;
8044 field_shift += W_FT_TOS;
8047 field_shift += W_FT_PROTOCOL;
8050 field_shift += W_FT_ETHERTYPE;
8053 field_shift += W_FT_MACMATCH;
8056 field_shift += W_FT_MPSHITTYPE;
8058 case F_FRAGMENTATION:
8059 field_shift += W_FT_FRAGMENTATION;
8066 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id)
8070 struct fw_port_cmd c;
8072 struct port_info *p = adap2pinfo(adap, port_id);
8075 memset(&c, 0, sizeof(c));
8077 for (i = 0, j = -1; i <= p->port_id; i++) {
8080 } while ((adap->params.portvec & (1 << j)) == 0);
8083 if (!(adap->flags & IS_VF) ||
8084 adap->params.vfres.r_caps & FW_CMD_CAP_PORT) {
8085 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) |
8086 F_FW_CMD_REQUEST | F_FW_CMD_READ |
8087 V_FW_PORT_CMD_PORTID(j));
8088 c.action_to_len16 = htonl(
8089 V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
8091 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
8095 ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
8096 p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
8097 G_FW_PORT_CMD_MDIOADDR(ret) : -1;
8098 p->port_type = G_FW_PORT_CMD_PTYPE(ret);
8099 p->mod_type = G_FW_PORT_CMD_MODTYPE(ret);
8101 init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap));
8104 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
8108 p->vi[0].viid = ret;
8109 if (chip_id(adap) <= CHELSIO_T5)
8110 p->vi[0].smt_idx = (ret & 0x7f) << 1;
8112 p->vi[0].smt_idx = (ret & 0x7f);
8114 p->rx_chan_map = t4_get_mps_bg_map(adap, j);
8116 p->vi[0].rss_size = rss_size;
8117 t4_os_set_hw_addr(adap, p->port_id, addr);
8119 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
8120 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
8121 V_FW_PARAMS_PARAM_YZ(p->vi[0].viid);
8122 ret = t4_query_params(adap, mbox, pf, vf, 1, ¶m, &val);
8124 p->vi[0].rss_base = 0xffff;
8126 /* MPASS((val >> 16) == rss_size); */
8127 p->vi[0].rss_base = val & 0xffff;
8134 * t4_read_cimq_cfg - read CIM queue configuration
8135 * @adap: the adapter
8136 * @base: holds the queue base addresses in bytes
8137 * @size: holds the queue sizes in bytes
8138 * @thres: holds the queue full thresholds in bytes
8140 * Returns the current configuration of the CIM queues, starting with
8141 * the IBQs, then the OBQs.
8143 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
8146 int cim_num_obq = adap->chip_params->cim_num_obq;
8148 for (i = 0; i < CIM_NUM_IBQ; i++) {
8149 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
8151 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
8152 /* value is in 256-byte units */
8153 *base++ = G_CIMQBASE(v) * 256;
8154 *size++ = G_CIMQSIZE(v) * 256;
8155 *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
8157 for (i = 0; i < cim_num_obq; i++) {
8158 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
8160 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
8161 /* value is in 256-byte units */
8162 *base++ = G_CIMQBASE(v) * 256;
8163 *size++ = G_CIMQSIZE(v) * 256;
8168 * t4_read_cim_ibq - read the contents of a CIM inbound queue
8169 * @adap: the adapter
8170 * @qid: the queue index
8171 * @data: where to store the queue contents
8172 * @n: capacity of @data in 32-bit words
8174 * Reads the contents of the selected CIM queue starting at address 0 up
8175 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
8176 * error and the number of 32-bit words actually read on success.
8178 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
8180 int i, err, attempts;
8182 const unsigned int nwords = CIM_IBQ_SIZE * 4;
8184 if (qid > 5 || (n & 3))
8187 addr = qid * nwords;
8191 /* It might take 3-10ms before the IBQ debug read access is allowed.
8192 * Wait for 1 Sec with a delay of 1 usec.
8196 for (i = 0; i < n; i++, addr++) {
8197 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
8199 err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
8203 *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
8205 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
8210 * t4_read_cim_obq - read the contents of a CIM outbound queue
8211 * @adap: the adapter
8212 * @qid: the queue index
8213 * @data: where to store the queue contents
8214 * @n: capacity of @data in 32-bit words
8216 * Reads the contents of the selected CIM queue starting at address 0 up
8217 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
8218 * error and the number of 32-bit words actually read on success.
8220 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
8223 unsigned int addr, v, nwords;
8224 int cim_num_obq = adap->chip_params->cim_num_obq;
8226 if ((qid > (cim_num_obq - 1)) || (n & 3))
8229 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
8230 V_QUENUMSELECT(qid));
8231 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
8233 addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */
8234 nwords = G_CIMQSIZE(v) * 64; /* same */
8238 for (i = 0; i < n; i++, addr++) {
8239 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
8241 err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
8245 *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
8247 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
8253 CIM_CTL_BASE = 0x2000,
8254 CIM_PBT_ADDR_BASE = 0x2800,
8255 CIM_PBT_LRF_BASE = 0x3000,
8256 CIM_PBT_DATA_BASE = 0x3800
8260 * t4_cim_read - read a block from CIM internal address space
8261 * @adap: the adapter
8262 * @addr: the start address within the CIM address space
8263 * @n: number of words to read
8264 * @valp: where to store the result
8266 * Reads a block of 4-byte words from the CIM intenal address space.
8268 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
8273 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
8276 for ( ; !ret && n--; addr += 4) {
8277 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
8278 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
8281 *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
8287 * t4_cim_write - write a block into CIM internal address space
8288 * @adap: the adapter
8289 * @addr: the start address within the CIM address space
8290 * @n: number of words to write
8291 * @valp: set of values to write
8293 * Writes a block of 4-byte words into the CIM intenal address space.
8295 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
8296 const unsigned int *valp)
8300 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
8303 for ( ; !ret && n--; addr += 4) {
8304 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
8305 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
8306 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
8312 static int t4_cim_write1(struct adapter *adap, unsigned int addr,
8315 return t4_cim_write(adap, addr, 1, &val);
8319 * t4_cim_ctl_read - read a block from CIM control region
8320 * @adap: the adapter
8321 * @addr: the start address within the CIM control region
8322 * @n: number of words to read
8323 * @valp: where to store the result
8325 * Reads a block of 4-byte words from the CIM control region.
8327 int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
8330 return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
8334 * t4_cim_read_la - read CIM LA capture buffer
8335 * @adap: the adapter
8336 * @la_buf: where to store the LA data
8337 * @wrptr: the HW write pointer within the capture buffer
8339 * Reads the contents of the CIM LA buffer with the most recent entry at
8340 * the end of the returned data and with the entry at @wrptr first.
8341 * We try to leave the LA in the running state we find it in.
8343 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
8346 unsigned int cfg, val, idx;
8348 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
8352 if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */
8353 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
8358 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
8362 idx = G_UPDBGLAWRPTR(val);
8366 for (i = 0; i < adap->params.cim_la_size; i++) {
8367 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
8368 V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
8371 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
8374 if (val & F_UPDBGLARDEN) {
8378 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
8382 /* address can't exceed 0xfff (UpDbgLaRdPtr is of 12-bits) */
8383 idx = (idx + 1) & M_UPDBGLARDPTR;
8385 * Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
8386 * identify the 32-bit portion of the full 312-bit data
8389 while ((idx & 0xf) > 9)
8390 idx = (idx + 1) % M_UPDBGLARDPTR;
8393 if (cfg & F_UPDBGLAEN) {
8394 int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
8395 cfg & ~F_UPDBGLARDEN);
8403 * t4_tp_read_la - read TP LA capture buffer
8404 * @adap: the adapter
8405 * @la_buf: where to store the LA data
8406 * @wrptr: the HW write pointer within the capture buffer
8408 * Reads the contents of the TP LA buffer with the most recent entry at
8409 * the end of the returned data and with the entry at @wrptr first.
8410 * We leave the LA in the running state we find it in.
8412 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
8414 bool last_incomplete;
8415 unsigned int i, cfg, val, idx;
8417 cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
8418 if (cfg & F_DBGLAENABLE) /* freeze LA */
8419 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
8420 adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
8422 val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
8423 idx = G_DBGLAWPTR(val);
8424 last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
8425 if (last_incomplete)
8426 idx = (idx + 1) & M_DBGLARPTR;
8431 val &= ~V_DBGLARPTR(M_DBGLARPTR);
8432 val |= adap->params.tp.la_mask;
8434 for (i = 0; i < TPLA_SIZE; i++) {
8435 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
8436 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
8437 idx = (idx + 1) & M_DBGLARPTR;
8440 /* Wipe out last entry if it isn't valid */
8441 if (last_incomplete)
8442 la_buf[TPLA_SIZE - 1] = ~0ULL;
8444 if (cfg & F_DBGLAENABLE) /* restore running state */
8445 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
8446 cfg | adap->params.tp.la_mask);
8450 * SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
8451 * seconds). If we find one of the SGE Ingress DMA State Machines in the same
8452 * state for more than the Warning Threshold then we'll issue a warning about
8453 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel
8454 * appears to be hung every Warning Repeat second till the situation clears.
8455 * If the situation clears, we'll note that as well.
8457 #define SGE_IDMA_WARN_THRESH 1
8458 #define SGE_IDMA_WARN_REPEAT 300
8461 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
8462 * @adapter: the adapter
8463 * @idma: the adapter IDMA Monitor state
8465 * Initialize the state of an SGE Ingress DMA Monitor.
8467 void t4_idma_monitor_init(struct adapter *adapter,
8468 struct sge_idma_monitor_state *idma)
8470 /* Initialize the state variables for detecting an SGE Ingress DMA
8471 * hang. The SGE has internal counters which count up on each clock
8472 * tick whenever the SGE finds its Ingress DMA State Engines in the
8473 * same state they were on the previous clock tick. The clock used is
8474 * the Core Clock so we have a limit on the maximum "time" they can
8475 * record; typically a very small number of seconds. For instance,
8476 * with a 600MHz Core Clock, we can only count up to a bit more than
8477 * 7s. So we'll synthesize a larger counter in order to not run the
8478 * risk of having the "timers" overflow and give us the flexibility to
8479 * maintain a Hung SGE State Machine of our own which operates across
8480 * a longer time frame.
8482 idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
8483 idma->idma_stalled[0] = idma->idma_stalled[1] = 0;
8487 * t4_idma_monitor - monitor SGE Ingress DMA state
8488 * @adapter: the adapter
8489 * @idma: the adapter IDMA Monitor state
8490 * @hz: number of ticks/second
8491 * @ticks: number of ticks since the last IDMA Monitor call
8493 void t4_idma_monitor(struct adapter *adapter,
8494 struct sge_idma_monitor_state *idma,
8497 int i, idma_same_state_cnt[2];
8499 /* Read the SGE Debug Ingress DMA Same State Count registers. These
8500 * are counters inside the SGE which count up on each clock when the
8501 * SGE finds its Ingress DMA State Engines in the same states they
8502 * were in the previous clock. The counters will peg out at
8503 * 0xffffffff without wrapping around so once they pass the 1s
8504 * threshold they'll stay above that till the IDMA state changes.
8506 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13);
8507 idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH);
8508 idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
8510 for (i = 0; i < 2; i++) {
8511 u32 debug0, debug11;
8513 /* If the Ingress DMA Same State Counter ("timer") is less
8514 * than 1s, then we can reset our synthesized Stall Timer and
8515 * continue. If we have previously emitted warnings about a
8516 * potential stalled Ingress Queue, issue a note indicating
8517 * that the Ingress Queue has resumed forward progress.
8519 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
8520 if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz)
8521 CH_WARN(adapter, "SGE idma%d, queue %u, "
8522 "resumed after %d seconds\n",
8523 i, idma->idma_qid[i],
8524 idma->idma_stalled[i]/hz);
8525 idma->idma_stalled[i] = 0;
8529 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
8530 * domain. The first time we get here it'll be because we
8531 * passed the 1s Threshold; each additional time it'll be
8532 * because the RX Timer Callback is being fired on its regular
8535 * If the stall is below our Potential Hung Ingress Queue
8536 * Warning Threshold, continue.
8538 if (idma->idma_stalled[i] == 0) {
8539 idma->idma_stalled[i] = hz;
8540 idma->idma_warn[i] = 0;
8542 idma->idma_stalled[i] += ticks;
8543 idma->idma_warn[i] -= ticks;
8546 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz)
8549 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
8551 if (idma->idma_warn[i] > 0)
8553 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz;
8555 /* Read and save the SGE IDMA State and Queue ID information.
8556 * We do this every time in case it changes across time ...
8557 * can't be too careful ...
8559 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0);
8560 debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
8561 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
8563 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11);
8564 debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
8565 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
8567 CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in "
8568 " state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
8569 i, idma->idma_qid[i], idma->idma_state[i],
8570 idma->idma_stalled[i]/hz,
8572 t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
8577 * t4_read_pace_tbl - read the pace table
8578 * @adap: the adapter
8579 * @pace_vals: holds the returned values
8581 * Returns the values of TP's pace table in microseconds.
8583 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
8587 for (i = 0; i < NTX_SCHED; i++) {
8588 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
8589 v = t4_read_reg(adap, A_TP_PACE_TABLE);
8590 pace_vals[i] = dack_ticks_to_usec(adap, v);
8595 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
8596 * @adap: the adapter
8597 * @sched: the scheduler index
8598 * @kbps: the byte rate in Kbps
8599 * @ipg: the interpacket delay in tenths of nanoseconds
8601 * Return the current configuration of a HW Tx scheduler.
8603 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
8606 unsigned int v, addr, bpt, cpt;
8609 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
8610 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
8611 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
8614 bpt = (v >> 8) & 0xff;
8617 *kbps = 0; /* scheduler disabled */
8619 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
8620 *kbps = (v * bpt) / 125;
8624 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
8625 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
8626 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
8630 *ipg = (10000 * v) / core_ticks_per_usec(adap);
8635 * t4_load_cfg - download config file
8636 * @adap: the adapter
8637 * @cfg_data: the cfg text file to write
8638 * @size: text file size
8640 * Write the supplied config text file to the card's serial flash.
8642 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
8644 int ret, i, n, cfg_addr;
8646 unsigned int flash_cfg_start_sec;
8647 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
8649 cfg_addr = t4_flash_cfg_addr(adap);
8654 flash_cfg_start_sec = addr / SF_SEC_SIZE;
8656 if (size > FLASH_CFG_MAX_SIZE) {
8657 CH_ERR(adap, "cfg file too large, max is %u bytes\n",
8658 FLASH_CFG_MAX_SIZE);
8662 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
8664 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
8665 flash_cfg_start_sec + i - 1);
8667 * If size == 0 then we're simply erasing the FLASH sectors associated
8668 * with the on-adapter Firmware Configuration File.
8670 if (ret || size == 0)
8673 /* this will write to the flash up to SF_PAGE_SIZE at a time */
8674 for (i = 0; i< size; i+= SF_PAGE_SIZE) {
8675 if ( (size - i) < SF_PAGE_SIZE)
8679 ret = t4_write_flash(adap, addr, n, cfg_data, 1);
8683 addr += SF_PAGE_SIZE;
8684 cfg_data += SF_PAGE_SIZE;
8689 CH_ERR(adap, "config file %s failed %d\n",
8690 (size == 0 ? "clear" : "download"), ret);
8695 * t5_fw_init_extern_mem - initialize the external memory
8696 * @adap: the adapter
8698 * Initializes the external memory on T5.
8700 int t5_fw_init_extern_mem(struct adapter *adap)
8702 u32 params[1], val[1];
8708 val[0] = 0xff; /* Initialize all MCs */
8709 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
8710 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT));
8711 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val,
8712 FW_CMD_MAX_TIMEOUT);
8717 /* BIOS boot headers */
8718 typedef struct pci_expansion_rom_header {
8719 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
8720 u8 reserved[22]; /* Reserved per processor Architecture data */
8721 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
8722 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
8724 /* Legacy PCI Expansion ROM Header */
8725 typedef struct legacy_pci_expansion_rom_header {
8726 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
8727 u8 size512; /* Current Image Size in units of 512 bytes */
8728 u8 initentry_point[4];
8729 u8 cksum; /* Checksum computed on the entire Image */
8730 u8 reserved[16]; /* Reserved */
8731 u8 pcir_offset[2]; /* Offset to PCI Data Struture */
8732 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
8734 /* EFI PCI Expansion ROM Header */
8735 typedef struct efi_pci_expansion_rom_header {
8736 u8 signature[2]; // ROM signature. The value 0xaa55
8737 u8 initialization_size[2]; /* Units 512. Includes this header */
8738 u8 efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
8739 u8 efi_subsystem[2]; /* Subsystem value for EFI image header */
8740 u8 efi_machine_type[2]; /* Machine type from EFI image header */
8741 u8 compression_type[2]; /* Compression type. */
8743 * Compression type definition
8746 * 0x2-0xFFFF: Reserved
8748 u8 reserved[8]; /* Reserved */
8749 u8 efi_image_header_offset[2]; /* Offset to EFI Image */
8750 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
8751 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
8753 /* PCI Data Structure Format */
8754 typedef struct pcir_data_structure { /* PCI Data Structure */
8755 u8 signature[4]; /* Signature. The string "PCIR" */
8756 u8 vendor_id[2]; /* Vendor Identification */
8757 u8 device_id[2]; /* Device Identification */
8758 u8 vital_product[2]; /* Pointer to Vital Product Data */
8759 u8 length[2]; /* PCIR Data Structure Length */
8760 u8 revision; /* PCIR Data Structure Revision */
8761 u8 class_code[3]; /* Class Code */
8762 u8 image_length[2]; /* Image Length. Multiple of 512B */
8763 u8 code_revision[2]; /* Revision Level of Code/Data */
8764 u8 code_type; /* Code Type. */
8766 * PCI Expansion ROM Code Types
8767 * 0x00: Intel IA-32, PC-AT compatible. Legacy
8768 * 0x01: Open Firmware standard for PCI. FCODE
8769 * 0x02: Hewlett-Packard PA RISC. HP reserved
8770 * 0x03: EFI Image. EFI
8771 * 0x04-0xFF: Reserved.
8773 u8 indicator; /* Indicator. Identifies the last image in the ROM */
8774 u8 reserved[2]; /* Reserved */
8775 } pcir_data_t; /* PCI__DATA_STRUCTURE */
8777 /* BOOT constants */
8779 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
8780 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */
8781 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */
8782 BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
8783 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment */
8784 VENDOR_ID = 0x1425, /* Vendor ID */
8785 PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
8789 * modify_device_id - Modifies the device ID of the Boot BIOS image
8790 * @adatper: the device ID to write.
8791 * @boot_data: the boot image to modify.
8793 * Write the supplied device ID to the boot BIOS image.
8795 static void modify_device_id(int device_id, u8 *boot_data)
8797 legacy_pci_exp_rom_header_t *header;
8798 pcir_data_t *pcir_header;
8802 * Loop through all chained images and change the device ID's
8805 header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
8806 pcir_header = (pcir_data_t *) &boot_data[cur_header +
8807 le16_to_cpu(*(u16*)header->pcir_offset)];
8810 * Only modify the Device ID if code type is Legacy or HP.
8811 * 0x00: Okay to modify
8812 * 0x01: FCODE. Do not be modify
8813 * 0x03: Okay to modify
8814 * 0x04-0xFF: Do not modify
8816 if (pcir_header->code_type == 0x00) {
8821 * Modify Device ID to match current adatper
8823 *(u16*) pcir_header->device_id = device_id;
8826 * Set checksum temporarily to 0.
8827 * We will recalculate it later.
8829 header->cksum = 0x0;
8832 * Calculate and update checksum
8834 for (i = 0; i < (header->size512 * 512); i++)
8835 csum += (u8)boot_data[cur_header + i];
8838 * Invert summed value to create the checksum
8839 * Writing new checksum value directly to the boot data
8841 boot_data[cur_header + 7] = -csum;
8843 } else if (pcir_header->code_type == 0x03) {
8846 * Modify Device ID to match current adatper
8848 *(u16*) pcir_header->device_id = device_id;
8854 * Check indicator element to identify if this is the last
8857 if (pcir_header->indicator & 0x80)
8861 * Move header pointer up to the next image in the ROM.
8863 cur_header += header->size512 * 512;
8868 * t4_load_boot - download boot flash
8869 * @adapter: the adapter
8870 * @boot_data: the boot image to write
8871 * @boot_addr: offset in flash to write boot_data
8874 * Write the supplied boot image to the card's serial flash.
8875 * The boot image has the following sections: a 28-byte header and the
8878 int t4_load_boot(struct adapter *adap, u8 *boot_data,
8879 unsigned int boot_addr, unsigned int size)
8881 pci_exp_rom_header_t *header;
8883 pcir_data_t *pcir_header;
8887 unsigned int boot_sector = (boot_addr * 1024 );
8888 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
8891 * Make sure the boot image does not encroach on the firmware region
8893 if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
8894 CH_ERR(adap, "boot image encroaching on firmware region\n");
8899 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
8900 * and Boot configuration data sections. These 3 boot sections span
8901 * sectors 0 to 7 in flash and live right before the FW image location.
8903 i = DIV_ROUND_UP(size ? size : FLASH_FW_START,
8905 ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
8906 (boot_sector >> 16) + i - 1);
8909 * If size == 0 then we're simply erasing the FLASH sectors associated
8910 * with the on-adapter option ROM file
8912 if (ret || (size == 0))
8915 /* Get boot header */
8916 header = (pci_exp_rom_header_t *)boot_data;
8917 pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
8918 /* PCIR Data Structure */
8919 pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
8922 * Perform some primitive sanity testing to avoid accidentally
8923 * writing garbage over the boot sectors. We ought to check for
8924 * more but it's not worth it for now ...
8926 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
8927 CH_ERR(adap, "boot image too small/large\n");
8931 #ifndef CHELSIO_T4_DIAGS
8933 * Check BOOT ROM header signature
8935 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
8936 CH_ERR(adap, "Boot image missing signature\n");
8941 * Check PCI header signature
8943 if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
8944 CH_ERR(adap, "PCI header missing signature\n");
8949 * Check Vendor ID matches Chelsio ID
8951 if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
8952 CH_ERR(adap, "Vendor ID missing signature\n");
8958 * Retrieve adapter's device ID
8960 t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
8961 /* Want to deal with PF 0 so I strip off PF 4 indicator */
8962 device_id = device_id & 0xf0ff;
8965 * Check PCIE Device ID
8967 if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
8969 * Change the device ID in the Boot BIOS image to match
8970 * the Device ID of the current adapter.
8972 modify_device_id(device_id, boot_data);
8976 * Skip over the first SF_PAGE_SIZE worth of data and write it after
8977 * we finish copying the rest of the boot image. This will ensure
8978 * that the BIOS boot header will only be written if the boot image
8979 * was written in full.
8982 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
8983 addr += SF_PAGE_SIZE;
8984 boot_data += SF_PAGE_SIZE;
8985 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
8990 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
8991 (const u8 *)header, 0);
8995 CH_ERR(adap, "boot image download failed, error %d\n", ret);
9000 * t4_flash_bootcfg_addr - return the address of the flash optionrom configuration
9001 * @adapter: the adapter
9003 * Return the address within the flash where the OptionROM Configuration
9004 * is stored, or an error if the device FLASH is too small to contain
9005 * a OptionROM Configuration.
9007 static int t4_flash_bootcfg_addr(struct adapter *adapter)
9010 * If the device FLASH isn't large enough to hold a Firmware
9011 * Configuration File, return an error.
9013 if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE)
9016 return FLASH_BOOTCFG_START;
9019 int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size)
9021 int ret, i, n, cfg_addr;
9023 unsigned int flash_cfg_start_sec;
9024 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
9026 cfg_addr = t4_flash_bootcfg_addr(adap);
9031 flash_cfg_start_sec = addr / SF_SEC_SIZE;
9033 if (size > FLASH_BOOTCFG_MAX_SIZE) {
9034 CH_ERR(adap, "bootcfg file too large, max is %u bytes\n",
9035 FLASH_BOOTCFG_MAX_SIZE);
9039 i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */
9041 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
9042 flash_cfg_start_sec + i - 1);
9045 * If size == 0 then we're simply erasing the FLASH sectors associated
9046 * with the on-adapter OptionROM Configuration File.
9048 if (ret || size == 0)
9051 /* this will write to the flash up to SF_PAGE_SIZE at a time */
9052 for (i = 0; i< size; i+= SF_PAGE_SIZE) {
9053 if ( (size - i) < SF_PAGE_SIZE)
9057 ret = t4_write_flash(adap, addr, n, cfg_data, 0);
9061 addr += SF_PAGE_SIZE;
9062 cfg_data += SF_PAGE_SIZE;
9067 CH_ERR(adap, "boot config data %s failed %d\n",
9068 (size == 0 ? "clear" : "download"), ret);
9073 * t4_set_filter_mode - configure the optional components of filter tuples
9074 * @adap: the adapter
9075 * @mode_map: a bitmap selcting which optional filter components to enable
9077 * Sets the filter mode by selecting the optional components to enable
9078 * in filter tuples. Returns 0 on success and a negative error if the
9079 * requested mode needs more bits than are available for optional
9082 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map)
9084 static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
9088 for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
9089 if (mode_map & (1 << i))
9091 if (nbits > FILTER_OPT_LEN)
9093 if (t4_use_ldst(adap))
9094 t4_fw_tp_pio_rw(adap, &mode_map, 1, A_TP_VLAN_PRI_MAP, 0);
9096 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map,
9097 1, A_TP_VLAN_PRI_MAP);
9098 read_filter_mode_and_ingress_config(adap);
9104 * t4_clr_port_stats - clear port statistics
9105 * @adap: the adapter
9106 * @idx: the port index
9108 * Clear HW statistics for the given port.
9110 void t4_clr_port_stats(struct adapter *adap, int idx)
9113 u32 bgmap = t4_get_mps_bg_map(adap, idx);
9117 port_base_addr = PORT_BASE(idx);
9119 port_base_addr = T5_PORT_BASE(idx);
9121 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
9122 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
9123 t4_write_reg(adap, port_base_addr + i, 0);
9124 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
9125 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
9126 t4_write_reg(adap, port_base_addr + i, 0);
9127 for (i = 0; i < 4; i++)
9128 if (bgmap & (1 << i)) {
9130 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
9132 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
9137 * t4_i2c_rd - read I2C data from adapter
9138 * @adap: the adapter
9139 * @port: Port number if per-port device; <0 if not
9140 * @devid: per-port device ID or absolute device ID
9141 * @offset: byte offset into device I2C space
9142 * @len: byte length of I2C space data
9143 * @buf: buffer in which to return I2C data
9145 * Reads the I2C data from the indicated device and location.
9147 int t4_i2c_rd(struct adapter *adap, unsigned int mbox,
9148 int port, unsigned int devid,
9149 unsigned int offset, unsigned int len,
9153 struct fw_ldst_cmd ldst;
9159 len > sizeof ldst.u.i2c.data)
9162 memset(&ldst, 0, sizeof ldst);
9163 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
9164 ldst.op_to_addrspace =
9165 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
9169 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
9170 ldst.u.i2c.pid = (port < 0 ? 0xff : port);
9171 ldst.u.i2c.did = devid;
9172 ldst.u.i2c.boffset = offset;
9173 ldst.u.i2c.blen = len;
9174 ret = t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
9176 memcpy(buf, ldst.u.i2c.data, len);
9181 * t4_i2c_wr - write I2C data to adapter
9182 * @adap: the adapter
9183 * @port: Port number if per-port device; <0 if not
9184 * @devid: per-port device ID or absolute device ID
9185 * @offset: byte offset into device I2C space
9186 * @len: byte length of I2C space data
9187 * @buf: buffer containing new I2C data
9189 * Write the I2C data to the indicated device and location.
9191 int t4_i2c_wr(struct adapter *adap, unsigned int mbox,
9192 int port, unsigned int devid,
9193 unsigned int offset, unsigned int len,
9197 struct fw_ldst_cmd ldst;
9202 len > sizeof ldst.u.i2c.data)
9205 memset(&ldst, 0, sizeof ldst);
9206 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
9207 ldst.op_to_addrspace =
9208 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
9212 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
9213 ldst.u.i2c.pid = (port < 0 ? 0xff : port);
9214 ldst.u.i2c.did = devid;
9215 ldst.u.i2c.boffset = offset;
9216 ldst.u.i2c.blen = len;
9217 memcpy(ldst.u.i2c.data, buf, len);
9218 return t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
9222 * t4_sge_ctxt_rd - read an SGE context through FW
9223 * @adap: the adapter
9224 * @mbox: mailbox to use for the FW command
9225 * @cid: the context id
9226 * @ctype: the context type
9227 * @data: where to store the context data
9229 * Issues a FW command through the given mailbox to read an SGE context.
9231 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
9232 enum ctxt_type ctype, u32 *data)
9235 struct fw_ldst_cmd c;
9237 if (ctype == CTXT_EGRESS)
9238 ret = FW_LDST_ADDRSPC_SGE_EGRC;
9239 else if (ctype == CTXT_INGRESS)
9240 ret = FW_LDST_ADDRSPC_SGE_INGC;
9241 else if (ctype == CTXT_FLM)
9242 ret = FW_LDST_ADDRSPC_SGE_FLMC;
9244 ret = FW_LDST_ADDRSPC_SGE_CONMC;
9246 memset(&c, 0, sizeof(c));
9247 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
9248 F_FW_CMD_REQUEST | F_FW_CMD_READ |
9249 V_FW_LDST_CMD_ADDRSPACE(ret));
9250 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
9251 c.u.idctxt.physid = cpu_to_be32(cid);
9253 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
9255 data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
9256 data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
9257 data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
9258 data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
9259 data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
9260 data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
9266 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
9267 * @adap: the adapter
9268 * @cid: the context id
9269 * @ctype: the context type
9270 * @data: where to store the context data
9272 * Reads an SGE context directly, bypassing FW. This is only for
9273 * debugging when FW is unavailable.
9275 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
9280 t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
9281 ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
9283 for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
9284 *data++ = t4_read_reg(adap, i);
9288 int t4_sched_config(struct adapter *adapter, int type, int minmaxen,
9291 struct fw_sched_cmd cmd;
9293 memset(&cmd, 0, sizeof(cmd));
9294 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
9297 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9299 cmd.u.config.sc = FW_SCHED_SC_CONFIG;
9300 cmd.u.config.type = type;
9301 cmd.u.config.minmaxen = minmaxen;
9303 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
9307 int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
9308 int rateunit, int ratemode, int channel, int cl,
9309 int minrate, int maxrate, int weight, int pktsize,
9312 struct fw_sched_cmd cmd;
9314 memset(&cmd, 0, sizeof(cmd));
9315 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
9318 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9320 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
9321 cmd.u.params.type = type;
9322 cmd.u.params.level = level;
9323 cmd.u.params.mode = mode;
9324 cmd.u.params.ch = channel;
9325 cmd.u.params.cl = cl;
9326 cmd.u.params.unit = rateunit;
9327 cmd.u.params.rate = ratemode;
9328 cmd.u.params.min = cpu_to_be32(minrate);
9329 cmd.u.params.max = cpu_to_be32(maxrate);
9330 cmd.u.params.weight = cpu_to_be16(weight);
9331 cmd.u.params.pktsize = cpu_to_be16(pktsize);
9333 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
9338 * t4_config_watchdog - configure (enable/disable) a watchdog timer
9339 * @adapter: the adapter
9340 * @mbox: mailbox to use for the FW command
9341 * @pf: the PF owning the queue
9342 * @vf: the VF owning the queue
9343 * @timeout: watchdog timeout in ms
9344 * @action: watchdog timer / action
9346 * There are separate watchdog timers for each possible watchdog
9347 * action. Configure one of the watchdog timers by setting a non-zero
9348 * timeout. Disable a watchdog timer by using a timeout of zero.
9350 int t4_config_watchdog(struct adapter *adapter, unsigned int mbox,
9351 unsigned int pf, unsigned int vf,
9352 unsigned int timeout, unsigned int action)
9354 struct fw_watchdog_cmd wdog;
9358 * The watchdog command expects a timeout in units of 10ms so we need
9359 * to convert it here (via rounding) and force a minimum of one 10ms
9360 * "tick" if the timeout is non-zero but the convertion results in 0
9363 ticks = (timeout + 5)/10;
9364 if (timeout && !ticks)
9367 memset(&wdog, 0, sizeof wdog);
9368 wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) |
9371 V_FW_PARAMS_CMD_PFN(pf) |
9372 V_FW_PARAMS_CMD_VFN(vf));
9373 wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog));
9374 wdog.timeout = cpu_to_be32(ticks);
9375 wdog.action = cpu_to_be32(action);
9377 return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL);
9380 int t4_get_devlog_level(struct adapter *adapter, unsigned int *level)
9382 struct fw_devlog_cmd devlog_cmd;
9385 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
9386 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
9387 F_FW_CMD_REQUEST | F_FW_CMD_READ);
9388 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
9389 ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
9390 sizeof(devlog_cmd), &devlog_cmd);
9394 *level = devlog_cmd.level;
9398 int t4_set_devlog_level(struct adapter *adapter, unsigned int level)
9400 struct fw_devlog_cmd devlog_cmd;
9402 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
9403 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
9406 devlog_cmd.level = level;
9407 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
9408 return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
9409 sizeof(devlog_cmd), &devlog_cmd);