2 * Copyright (c) 2012, 2016 Chelsio Communications, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
34 #include "t4_regs_values.h"
35 #include "firmware/t4fw_interface.h"
38 #define msleep(x) do { \
42 pause("t4hw", (x) * hz / 1000); \
46 * t4_wait_op_done_val - wait until an operation is completed
47 * @adapter: the adapter performing the operation
48 * @reg: the register to check for completion
49 * @mask: a single-bit field within @reg that indicates completion
50 * @polarity: the value of the field when the operation is completed
51 * @attempts: number of check iterations
52 * @delay: delay in usecs between iterations
53 * @valp: where to store the value of the register at completion time
55 * Wait until an operation is completed by checking a bit in a register
56 * up to @attempts times. If @valp is not NULL the value of the register
57 * at the time it indicated completion is stored there. Returns 0 if the
58 * operation completes and -EAGAIN otherwise.
60 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
61 int polarity, int attempts, int delay, u32 *valp)
64 u32 val = t4_read_reg(adapter, reg);
66 if (!!(val & mask) == polarity) {
78 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
79 int polarity, int attempts, int delay)
81 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
86 * t4_set_reg_field - set a register field to a value
87 * @adapter: the adapter to program
88 * @addr: the register address
89 * @mask: specifies the portion of the register to modify
90 * @val: the new value for the register field
92 * Sets a register field specified by the supplied mask to the
95 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
98 u32 v = t4_read_reg(adapter, addr) & ~mask;
100 t4_write_reg(adapter, addr, v | val);
101 (void) t4_read_reg(adapter, addr); /* flush */
105 * t4_read_indirect - read indirectly addressed registers
107 * @addr_reg: register holding the indirect address
108 * @data_reg: register holding the value of the indirect register
109 * @vals: where the read register values are stored
110 * @nregs: how many indirect registers to read
111 * @start_idx: index of first indirect register to read
113 * Reads registers that are accessed indirectly through an address/data
116 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
117 unsigned int data_reg, u32 *vals,
118 unsigned int nregs, unsigned int start_idx)
121 t4_write_reg(adap, addr_reg, start_idx);
122 *vals++ = t4_read_reg(adap, data_reg);
128 * t4_write_indirect - write indirectly addressed registers
130 * @addr_reg: register holding the indirect addresses
131 * @data_reg: register holding the value for the indirect registers
132 * @vals: values to write
133 * @nregs: how many indirect registers to write
134 * @start_idx: address of first indirect register to write
136 * Writes a sequential block of registers that are accessed indirectly
137 * through an address/data register pair.
139 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
140 unsigned int data_reg, const u32 *vals,
141 unsigned int nregs, unsigned int start_idx)
144 t4_write_reg(adap, addr_reg, start_idx++);
145 t4_write_reg(adap, data_reg, *vals++);
150 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
151 * mechanism. This guarantees that we get the real value even if we're
152 * operating within a Virtual Machine and the Hypervisor is trapping our
153 * Configuration Space accesses.
155 * N.B. This routine should only be used as a last resort: the firmware uses
156 * the backdoor registers on a regular basis and we can end up
157 * conflicting with it's uses!
159 u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
161 u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg);
164 if (chip_id(adap) <= CHELSIO_T5)
172 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req);
173 val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
176 * Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
177 * Configuration Space read. (None of the other fields matter when
178 * F_ENABLE is 0 so a simple register write is easier than a
179 * read-modify-write via t4_set_reg_field().)
181 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0);
187 * t4_report_fw_error - report firmware error
190 * The adapter firmware can indicate error conditions to the host.
191 * If the firmware has indicated an error, print out the reason for
192 * the firmware error.
194 static void t4_report_fw_error(struct adapter *adap)
196 static const char *const reason[] = {
197 "Crash", /* PCIE_FW_EVAL_CRASH */
198 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
199 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
200 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
201 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
202 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
203 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
204 "Reserved", /* reserved */
208 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
209 if (pcie_fw & F_PCIE_FW_ERR)
210 CH_ERR(adap, "Firmware reports adapter error: %s\n",
211 reason[G_PCIE_FW_EVAL(pcie_fw)]);
215 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
217 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
220 for ( ; nflit; nflit--, mbox_addr += 8)
221 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
225 * Handle a FW assertion reported in a mailbox.
227 static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt)
230 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
231 asrt->u.assert.filename_0_7,
232 be32_to_cpu(asrt->u.assert.line),
233 be32_to_cpu(asrt->u.assert.x),
234 be32_to_cpu(asrt->u.assert.y));
237 #define X_CIM_PF_NOACCESS 0xeeeeeeee
239 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
241 * @mbox: index of the mailbox to use
242 * @cmd: the command to write
243 * @size: command length in bytes
244 * @rpl: where to optionally store the reply
245 * @sleep_ok: if true we may sleep while awaiting command completion
246 * @timeout: time to wait for command to finish before timing out
247 * (negative implies @sleep_ok=false)
249 * Sends the given command to FW through the selected mailbox and waits
250 * for the FW to execute the command. If @rpl is not %NULL it is used to
251 * store the FW's reply to the command. The command and its optional
252 * reply are of the same length. Some FW commands like RESET and
253 * INITIALIZE can take a considerable amount of time to execute.
254 * @sleep_ok determines whether we may sleep while awaiting the response.
255 * If sleeping is allowed we use progressive backoff otherwise we spin.
256 * Note that passing in a negative @timeout is an alternate mechanism
257 * for specifying @sleep_ok=false. This is useful when a higher level
258 * interface allows for specification of @timeout but not @sleep_ok ...
260 * The return value is 0 on success or a negative errno on failure. A
261 * failure can happen either because we are not able to execute the
262 * command or FW executes it but signals an error. In the latter case
263 * the return value is the error code indicated by FW (negated).
265 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
266 int size, void *rpl, bool sleep_ok, int timeout)
269 * We delay in small increments at first in an effort to maintain
270 * responsiveness for simple, fast executing commands but then back
271 * off to larger delays to a maximum retry delay.
273 static const int delay[] = {
274 1, 1, 3, 5, 10, 10, 20, 50, 100
278 int i, ms, delay_idx, ret;
279 const __be64 *p = cmd;
280 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
281 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
283 __be64 cmd_rpl[MBOX_LEN/8];
286 if ((size & 15) || size > MBOX_LEN)
289 if (adap->flags & IS_VF) {
291 data_reg = FW_T6VF_MBDATA_BASE_ADDR;
293 data_reg = FW_T4VF_MBDATA_BASE_ADDR;
294 ctl_reg = VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL);
298 * If we have a negative timeout, that implies that we can't sleep.
306 * Attempt to gain access to the mailbox.
308 for (i = 0; i < 4; i++) {
309 ctl = t4_read_reg(adap, ctl_reg);
311 if (v != X_MBOWNER_NONE)
316 * If we were unable to gain access, dequeue ourselves from the
317 * mailbox atomic access list and report the error to our caller.
319 if (v != X_MBOWNER_PL) {
320 t4_report_fw_error(adap);
321 ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
326 * If we gain ownership of the mailbox and there's a "valid" message
327 * in it, this is likely an asynchronous error message from the
328 * firmware. So we'll report that and then proceed on with attempting
329 * to issue our own command ... which may well fail if the error
330 * presaged the firmware crashing ...
332 if (ctl & F_MBMSGVALID) {
333 CH_ERR(adap, "found VALID command in mbox %u: "
334 "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
335 (unsigned long long)t4_read_reg64(adap, data_reg),
336 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
337 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
338 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
339 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
340 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
341 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
342 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
346 * Copy in the new mailbox command and send it on its way ...
348 for (i = 0; i < size; i += 8, p++)
349 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
351 if (adap->flags & IS_VF) {
353 * For the VFs, the Mailbox Data "registers" are
354 * actually backed by T4's "MA" interface rather than
355 * PL Registers (as is the case for the PFs). Because
356 * these are in different coherency domains, the write
357 * to the VF's PL-register-backed Mailbox Control can
358 * race in front of the writes to the MA-backed VF
359 * Mailbox Data "registers". So we need to do a
360 * read-back on at least one byte of the VF Mailbox
361 * Data registers before doing the write to the VF
362 * Mailbox Control register.
364 t4_read_reg(adap, data_reg);
367 CH_DUMP_MBOX(adap, mbox, data_reg);
369 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
370 t4_read_reg(adap, ctl_reg); /* flush write */
376 * Loop waiting for the reply; bail out if we time out or the firmware
380 for (i = 0; i < timeout; i += ms) {
381 if (!(adap->flags & IS_VF)) {
382 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
383 if (pcie_fw & F_PCIE_FW_ERR)
387 ms = delay[delay_idx]; /* last element may repeat */
388 if (delay_idx < ARRAY_SIZE(delay) - 1)
395 v = t4_read_reg(adap, ctl_reg);
396 if (v == X_CIM_PF_NOACCESS)
398 if (G_MBOWNER(v) == X_MBOWNER_PL) {
399 if (!(v & F_MBMSGVALID)) {
400 t4_write_reg(adap, ctl_reg,
401 V_MBOWNER(X_MBOWNER_NONE));
406 * Retrieve the command reply and release the mailbox.
408 get_mbox_rpl(adap, cmd_rpl, MBOX_LEN/8, data_reg);
409 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
411 CH_DUMP_MBOX(adap, mbox, data_reg);
413 res = be64_to_cpu(cmd_rpl[0]);
414 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
415 fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl);
416 res = V_FW_CMD_RETVAL(EIO);
418 memcpy(rpl, cmd_rpl, size);
419 return -G_FW_CMD_RETVAL((int)res);
424 * We timed out waiting for a reply to our mailbox command. Report
425 * the error and also check to see if the firmware reported any
428 ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
429 CH_ERR(adap, "command %#x in mailbox %d timed out\n",
430 *(const u8 *)cmd, mbox);
432 /* If DUMP_MBOX is set the mbox has already been dumped */
433 if ((adap->debug_flags & DF_DUMP_MBOX) == 0) {
435 CH_ERR(adap, "mbox: %016llx %016llx %016llx %016llx "
436 "%016llx %016llx %016llx %016llx\n",
437 (unsigned long long)be64_to_cpu(p[0]),
438 (unsigned long long)be64_to_cpu(p[1]),
439 (unsigned long long)be64_to_cpu(p[2]),
440 (unsigned long long)be64_to_cpu(p[3]),
441 (unsigned long long)be64_to_cpu(p[4]),
442 (unsigned long long)be64_to_cpu(p[5]),
443 (unsigned long long)be64_to_cpu(p[6]),
444 (unsigned long long)be64_to_cpu(p[7]));
447 t4_report_fw_error(adap);
452 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
453 void *rpl, bool sleep_ok)
455 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl,
456 sleep_ok, FW_CMD_MAX_TIMEOUT);
460 static int t4_edc_err_read(struct adapter *adap, int idx)
462 u32 edc_ecc_err_addr_reg;
463 u32 edc_bist_status_rdata_reg;
466 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
469 if (idx != MEM_EDC0 && idx != MEM_EDC1) {
470 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
474 edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx);
475 edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx);
478 "edc%d err addr 0x%x: 0x%x.\n",
479 idx, edc_ecc_err_addr_reg,
480 t4_read_reg(adap, edc_ecc_err_addr_reg));
482 "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
483 edc_bist_status_rdata_reg,
484 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg),
485 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8),
486 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16),
487 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24),
488 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32),
489 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40),
490 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48),
491 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56),
492 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64));
498 * t4_mc_read - read from MC through backdoor accesses
500 * @idx: which MC to access
501 * @addr: address of first byte requested
502 * @data: 64 bytes of data containing the requested address
503 * @ecc: where to store the corresponding 64-bit ECC word
505 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
506 * that covers the requested address @addr. If @parity is not %NULL it
507 * is assigned the 64-bit ECC word for the read data.
509 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
512 u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
513 u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
516 mc_bist_cmd_reg = A_MC_BIST_CMD;
517 mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR;
518 mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN;
519 mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA;
520 mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN;
522 mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx);
523 mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx);
524 mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx);
525 mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA,
527 mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN,
531 if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST)
533 t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU);
534 t4_write_reg(adap, mc_bist_cmd_len_reg, 64);
535 t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc);
536 t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) |
537 F_START_BIST | V_BIST_CMD_GAP(1));
538 i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
542 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i)
544 for (i = 15; i >= 0; i--)
545 *data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
547 *ecc = t4_read_reg64(adap, MC_DATA(16));
553 * t4_edc_read - read from EDC through backdoor accesses
555 * @idx: which EDC to access
556 * @addr: address of first byte requested
557 * @data: 64 bytes of data containing the requested address
558 * @ecc: where to store the corresponding 64-bit ECC word
560 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
561 * that covers the requested address @addr. If @parity is not %NULL it
562 * is assigned the 64-bit ECC word for the read data.
564 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
567 u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
568 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
571 edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx);
572 edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx);
573 edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx);
574 edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN,
576 edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA,
580 * These macro are missing in t4_regs.h file.
581 * Added temporarily for testing.
583 #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
584 #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
585 edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx);
586 edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx);
587 edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx);
588 edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN,
590 edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA,
596 if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST)
598 t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU);
599 t4_write_reg(adap, edc_bist_cmd_len_reg, 64);
600 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
601 t4_write_reg(adap, edc_bist_cmd_reg,
602 V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
603 i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
607 #define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i)
609 for (i = 15; i >= 0; i--)
610 *data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
612 *ecc = t4_read_reg64(adap, EDC_DATA(16));
618 * t4_mem_read - read EDC 0, EDC 1 or MC into buffer
620 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
621 * @addr: address within indicated memory type
622 * @len: amount of memory to read
623 * @buf: host memory buffer
625 * Reads an [almost] arbitrary memory region in the firmware: the
626 * firmware memory address, length and host buffer must be aligned on
627 * 32-bit boudaries. The memory is returned as a raw byte sequence from
628 * the firmware's memory. If this memory contains data structures which
629 * contain multi-byte integers, it's the callers responsibility to
630 * perform appropriate byte order conversions.
632 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
635 u32 pos, start, end, offset;
639 * Argument sanity checks ...
641 if ((addr & 0x3) || (len & 0x3))
645 * The underlaying EDC/MC read routines read 64 bytes at a time so we
646 * need to round down the start and round up the end. We'll start
647 * copying out of the first line at (addr - start) a word at a time.
649 start = addr & ~(64-1);
650 end = (addr + len + 64-1) & ~(64-1);
651 offset = (addr - start)/sizeof(__be32);
653 for (pos = start; pos < end; pos += 64, offset = 0) {
657 * Read the chip's memory block and bail if there's an error.
659 if ((mtype == MEM_MC) || (mtype == MEM_MC1))
660 ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL);
662 ret = t4_edc_read(adap, mtype, pos, data, NULL);
667 * Copy the data into the caller's memory buffer.
669 while (offset < 16 && len > 0) {
670 *buf++ = data[offset++];
671 len -= sizeof(__be32);
679 * Return the specified PCI-E Configuration Space register from our Physical
680 * Function. We try first via a Firmware LDST Command (if fw_attach != 0)
681 * since we prefer to let the firmware own all of these registers, but if that
682 * fails we go for it directly ourselves.
684 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach)
688 * If fw_attach != 0, construct and send the Firmware LDST Command to
689 * retrieve the specified PCI-E Configuration Space register.
691 if (drv_fw_attach != 0) {
692 struct fw_ldst_cmd ldst_cmd;
695 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
696 ldst_cmd.op_to_addrspace =
697 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
700 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
701 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
702 ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1);
703 ldst_cmd.u.pcie.ctrl_to_fn =
704 (F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf));
705 ldst_cmd.u.pcie.r = reg;
708 * If the LDST Command succeeds, return the result, otherwise
709 * fall through to reading it directly ourselves ...
711 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
714 return be32_to_cpu(ldst_cmd.u.pcie.data[0]);
716 CH_WARN(adap, "Firmware failed to return "
717 "Configuration Space register %d, err = %d\n",
722 * Read the desired Configuration Space register via the PCI-E
723 * Backdoor mechanism.
725 return t4_hw_pci_read_cfg4(adap, reg);
729 * t4_get_regs_len - return the size of the chips register set
730 * @adapter: the adapter
732 * Returns the size of the chip's BAR0 register space.
734 unsigned int t4_get_regs_len(struct adapter *adapter)
736 unsigned int chip_version = chip_id(adapter);
738 switch (chip_version) {
740 if (adapter->flags & IS_VF)
741 return FW_T4VF_REGMAP_SIZE;
742 return T4_REGMAP_SIZE;
746 if (adapter->flags & IS_VF)
747 return FW_T4VF_REGMAP_SIZE;
748 return T5_REGMAP_SIZE;
752 "Unsupported chip version %d\n", chip_version);
757 * t4_get_regs - read chip registers into provided buffer
759 * @buf: register buffer
760 * @buf_size: size (in bytes) of register buffer
762 * If the provided register buffer isn't large enough for the chip's
763 * full register range, the register dump will be truncated to the
764 * register buffer's size.
766 void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
768 static const unsigned int t4_reg_ranges[] = {
1227 static const unsigned int t4vf_reg_ranges[] = {
1228 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
1229 VF_MPS_REG(A_MPS_VF_CTL),
1230 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
1231 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_WHOAMI),
1232 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
1233 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
1234 FW_T4VF_MBDATA_BASE_ADDR,
1235 FW_T4VF_MBDATA_BASE_ADDR +
1236 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
1239 static const unsigned int t5_reg_ranges[] = {
2006 static const unsigned int t5vf_reg_ranges[] = {
2007 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
2008 VF_MPS_REG(A_MPS_VF_CTL),
2009 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
2010 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION),
2011 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
2012 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
2013 FW_T4VF_MBDATA_BASE_ADDR,
2014 FW_T4VF_MBDATA_BASE_ADDR +
2015 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
2018 static const unsigned int t6_reg_ranges[] = {
2579 static const unsigned int t6vf_reg_ranges[] = {
2580 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
2581 VF_MPS_REG(A_MPS_VF_CTL),
2582 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
2583 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION),
2584 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
2585 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
2586 FW_T6VF_MBDATA_BASE_ADDR,
2587 FW_T6VF_MBDATA_BASE_ADDR +
2588 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
2591 u32 *buf_end = (u32 *)(buf + buf_size);
2592 const unsigned int *reg_ranges;
2593 int reg_ranges_size, range;
2594 unsigned int chip_version = chip_id(adap);
2597 * Select the right set of register ranges to dump depending on the
2598 * adapter chip type.
2600 switch (chip_version) {
2602 if (adap->flags & IS_VF) {
2603 reg_ranges = t4vf_reg_ranges;
2604 reg_ranges_size = ARRAY_SIZE(t4vf_reg_ranges);
2606 reg_ranges = t4_reg_ranges;
2607 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
2612 if (adap->flags & IS_VF) {
2613 reg_ranges = t5vf_reg_ranges;
2614 reg_ranges_size = ARRAY_SIZE(t5vf_reg_ranges);
2616 reg_ranges = t5_reg_ranges;
2617 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
2622 if (adap->flags & IS_VF) {
2623 reg_ranges = t6vf_reg_ranges;
2624 reg_ranges_size = ARRAY_SIZE(t6vf_reg_ranges);
2626 reg_ranges = t6_reg_ranges;
2627 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
2633 "Unsupported chip version %d\n", chip_version);
2638 * Clear the register buffer and insert the appropriate register
2639 * values selected by the above register ranges.
2641 memset(buf, 0, buf_size);
2642 for (range = 0; range < reg_ranges_size; range += 2) {
2643 unsigned int reg = reg_ranges[range];
2644 unsigned int last_reg = reg_ranges[range + 1];
2645 u32 *bufp = (u32 *)(buf + reg);
2648 * Iterate across the register range filling in the register
2649 * buffer but don't write past the end of the register buffer.
2651 while (reg <= last_reg && bufp < buf_end) {
2652 *bufp++ = t4_read_reg(adap, reg);
2659 * Partial EEPROM Vital Product Data structure. Includes only the ID and
2671 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
2673 #define EEPROM_DELAY 10 /* 10us per poll spin */
2674 #define EEPROM_MAX_POLL 5000 /* x 5000 == 50ms */
2676 #define EEPROM_STAT_ADDR 0x7bfc
2677 #define VPD_SIZE 0x800
2678 #define VPD_BASE 0x400
2679 #define VPD_BASE_OLD 0
2680 #define VPD_LEN 1024
2681 #define VPD_INFO_FLD_HDR_SIZE 3
2682 #define CHELSIO_VPD_UNIQUE_ID 0x82
2685 * Small utility function to wait till any outstanding VPD Access is complete.
2686 * We have a per-adapter state variable "VPD Busy" to indicate when we have a
2687 * VPD Access in flight. This allows us to handle the problem of having a
2688 * previous VPD Access time out and prevent an attempt to inject a new VPD
2689 * Request before any in-flight VPD reguest has completed.
2691 static int t4_seeprom_wait(struct adapter *adapter)
2693 unsigned int base = adapter->params.pci.vpd_cap_addr;
2697 * If no VPD Access is in flight, we can just return success right
2700 if (!adapter->vpd_busy)
2704 * Poll the VPD Capability Address/Flag register waiting for it
2705 * to indicate that the operation is complete.
2707 max_poll = EEPROM_MAX_POLL;
2711 udelay(EEPROM_DELAY);
2712 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
2715 * If the operation is complete, mark the VPD as no longer
2716 * busy and return success.
2718 if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
2719 adapter->vpd_busy = 0;
2722 } while (--max_poll);
2725 * Failure! Note that we leave the VPD Busy status set in order to
2726 * avoid pushing a new VPD Access request into the VPD Capability till
2727 * the current operation eventually succeeds. It's a bug to issue a
2728 * new request when an existing request is in flight and will result
2729 * in corrupt hardware state.
2735 * t4_seeprom_read - read a serial EEPROM location
2736 * @adapter: adapter to read
2737 * @addr: EEPROM virtual address
2738 * @data: where to store the read data
2740 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
2741 * VPD capability. Note that this function must be called with a virtual
2744 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
2746 unsigned int base = adapter->params.pci.vpd_cap_addr;
2750 * VPD Accesses must alway be 4-byte aligned!
2752 if (addr >= EEPROMVSIZE || (addr & 3))
2756 * Wait for any previous operation which may still be in flight to
2759 ret = t4_seeprom_wait(adapter);
2761 CH_ERR(adapter, "VPD still busy from previous operation\n");
2766 * Issue our new VPD Read request, mark the VPD as being busy and wait
2767 * for our request to complete. If it doesn't complete, note the
2768 * error and return it to our caller. Note that we do not reset the
2771 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
2772 adapter->vpd_busy = 1;
2773 adapter->vpd_flag = PCI_VPD_ADDR_F;
2774 ret = t4_seeprom_wait(adapter);
2776 CH_ERR(adapter, "VPD read of address %#x failed\n", addr);
2781 * Grab the returned data, swizzle it into our endianess and
2784 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
2785 *data = le32_to_cpu(*data);
2790 * t4_seeprom_write - write a serial EEPROM location
2791 * @adapter: adapter to write
2792 * @addr: virtual EEPROM address
2793 * @data: value to write
2795 * Write a 32-bit word to a location in serial EEPROM using the card's PCI
2796 * VPD capability. Note that this function must be called with a virtual
2799 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
2801 unsigned int base = adapter->params.pci.vpd_cap_addr;
2807 * VPD Accesses must alway be 4-byte aligned!
2809 if (addr >= EEPROMVSIZE || (addr & 3))
2813 * Wait for any previous operation which may still be in flight to
2816 ret = t4_seeprom_wait(adapter);
2818 CH_ERR(adapter, "VPD still busy from previous operation\n");
2823 * Issue our new VPD Read request, mark the VPD as being busy and wait
2824 * for our request to complete. If it doesn't complete, note the
2825 * error and return it to our caller. Note that we do not reset the
2828 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
2830 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
2831 (u16)addr | PCI_VPD_ADDR_F);
2832 adapter->vpd_busy = 1;
2833 adapter->vpd_flag = 0;
2834 ret = t4_seeprom_wait(adapter);
2836 CH_ERR(adapter, "VPD write of address %#x failed\n", addr);
2841 * Reset PCI_VPD_DATA register after a transaction and wait for our
2842 * request to complete. If it doesn't complete, return error.
2844 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
2845 max_poll = EEPROM_MAX_POLL;
2847 udelay(EEPROM_DELAY);
2848 t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
2849 } while ((stats_reg & 0x1) && --max_poll);
2853 /* Return success! */
2858 * t4_eeprom_ptov - translate a physical EEPROM address to virtual
2859 * @phys_addr: the physical EEPROM address
2860 * @fn: the PCI function number
2861 * @sz: size of function-specific area
2863 * Translate a physical EEPROM address to virtual. The first 1K is
2864 * accessed through virtual addresses starting at 31K, the rest is
2865 * accessed through virtual addresses starting at 0.
2867 * The mapping is as follows:
2868 * [0..1K) -> [31K..32K)
2869 * [1K..1K+A) -> [ES-A..ES)
2870 * [1K+A..ES) -> [0..ES-A-1K)
2872 * where A = @fn * @sz, and ES = EEPROM size.
2874 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2877 if (phys_addr < 1024)
2878 return phys_addr + (31 << 10);
2879 if (phys_addr < 1024 + fn)
2880 return EEPROMSIZE - fn + phys_addr - 1024;
2881 if (phys_addr < EEPROMSIZE)
2882 return phys_addr - 1024 - fn;
2887 * t4_seeprom_wp - enable/disable EEPROM write protection
2888 * @adapter: the adapter
2889 * @enable: whether to enable or disable write protection
2891 * Enables or disables write protection on the serial EEPROM.
2893 int t4_seeprom_wp(struct adapter *adapter, int enable)
2895 return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
2899 * get_vpd_keyword_val - Locates an information field keyword in the VPD
2900 * @v: Pointer to buffered vpd data structure
2901 * @kw: The keyword to search for
2903 * Returns the value of the information field keyword or
2904 * -ENOENT otherwise.
2906 static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
2909 unsigned int offset , len;
2910 const u8 *buf = (const u8 *)v;
2911 const u8 *vpdr_len = &v->vpdr_len[0];
2912 offset = sizeof(struct t4_vpd_hdr);
2913 len = (u16)vpdr_len[0] + ((u16)vpdr_len[1] << 8);
2915 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
2919 for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
2920 if(memcmp(buf + i , kw , 2) == 0){
2921 i += VPD_INFO_FLD_HDR_SIZE;
2925 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
2933 * get_vpd_params - read VPD parameters from VPD EEPROM
2934 * @adapter: adapter to read
2935 * @p: where to store the parameters
2936 * @vpd: caller provided temporary space to read the VPD into
2938 * Reads card parameters stored in VPD EEPROM.
2940 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p,
2946 const struct t4_vpd_hdr *v;
2949 * Card information normally starts at VPD_BASE but early cards had
2952 ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd));
2957 * The VPD shall have a unique identifier specified by the PCI SIG.
2958 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
2959 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
2960 * is expected to automatically put this entry at the
2961 * beginning of the VPD.
2963 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
2965 for (i = 0; i < VPD_LEN; i += 4) {
2966 ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i));
2970 v = (const struct t4_vpd_hdr *)vpd;
2972 #define FIND_VPD_KW(var,name) do { \
2973 var = get_vpd_keyword_val(v , name); \
2975 CH_ERR(adapter, "missing VPD keyword " name "\n"); \
2980 FIND_VPD_KW(i, "RV");
2981 for (csum = 0; i >= 0; i--)
2986 "corrupted VPD EEPROM, actual csum %u\n", csum);
2990 FIND_VPD_KW(ec, "EC");
2991 FIND_VPD_KW(sn, "SN");
2992 FIND_VPD_KW(pn, "PN");
2993 FIND_VPD_KW(na, "NA");
2996 memcpy(p->id, v->id_data, ID_LEN);
2998 memcpy(p->ec, vpd + ec, EC_LEN);
3000 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
3001 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
3003 i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2];
3004 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
3005 strstrip((char *)p->pn);
3006 i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2];
3007 memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
3008 strstrip((char *)p->na);
3013 /* serial flash and firmware constants and flash config file constants */
3015 SF_ATTEMPTS = 10, /* max retries for SF operations */
3017 /* flash command opcodes */
3018 SF_PROG_PAGE = 2, /* program 256B page */
3019 SF_WR_DISABLE = 4, /* disable writes */
3020 SF_RD_STATUS = 5, /* read status register */
3021 SF_WR_ENABLE = 6, /* enable writes */
3022 SF_RD_DATA_FAST = 0xb, /* read flash */
3023 SF_RD_ID = 0x9f, /* read ID */
3024 SF_ERASE_SECTOR = 0xd8, /* erase 64KB sector */
3028 * sf1_read - read data from the serial flash
3029 * @adapter: the adapter
3030 * @byte_cnt: number of bytes to read
3031 * @cont: whether another operation will be chained
3032 * @lock: whether to lock SF for PL access only
3033 * @valp: where to store the read data
3035 * Reads up to 4 bytes of data from the serial flash. The location of
3036 * the read needs to be specified prior to calling this by issuing the
3037 * appropriate commands to the serial flash.
3039 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
3040 int lock, u32 *valp)
3044 if (!byte_cnt || byte_cnt > 4)
3046 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3048 t4_write_reg(adapter, A_SF_OP,
3049 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
3050 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3052 *valp = t4_read_reg(adapter, A_SF_DATA);
3057 * sf1_write - write data to the serial flash
3058 * @adapter: the adapter
3059 * @byte_cnt: number of bytes to write
3060 * @cont: whether another operation will be chained
3061 * @lock: whether to lock SF for PL access only
3062 * @val: value to write
3064 * Writes up to 4 bytes of data to the serial flash. The location of
3065 * the write needs to be specified prior to calling this by issuing the
3066 * appropriate commands to the serial flash.
3068 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
3071 if (!byte_cnt || byte_cnt > 4)
3073 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3075 t4_write_reg(adapter, A_SF_DATA, val);
3076 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
3077 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
3078 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3082 * flash_wait_op - wait for a flash operation to complete
3083 * @adapter: the adapter
3084 * @attempts: max number of polls of the status register
3085 * @delay: delay between polls in ms
3087 * Wait for a flash operation to complete by polling the status register.
3089 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
3095 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
3096 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
3100 if (--attempts == 0)
3108 * t4_read_flash - read words from serial flash
3109 * @adapter: the adapter
3110 * @addr: the start address for the read
3111 * @nwords: how many 32-bit words to read
3112 * @data: where to store the read data
3113 * @byte_oriented: whether to store data as bytes or as words
3115 * Read the specified number of 32-bit words from the serial flash.
3116 * If @byte_oriented is set the read data is stored as a byte array
3117 * (i.e., big-endian), otherwise as 32-bit words in the platform's
3118 * natural endianness.
3120 int t4_read_flash(struct adapter *adapter, unsigned int addr,
3121 unsigned int nwords, u32 *data, int byte_oriented)
3125 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
3128 addr = swab32(addr) | SF_RD_DATA_FAST;
3130 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
3131 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
3134 for ( ; nwords; nwords--, data++) {
3135 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
3137 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3141 *data = (__force __u32)(cpu_to_be32(*data));
3147 * t4_write_flash - write up to a page of data to the serial flash
3148 * @adapter: the adapter
3149 * @addr: the start address to write
3150 * @n: length of data to write in bytes
3151 * @data: the data to write
3152 * @byte_oriented: whether to store data as bytes or as words
3154 * Writes up to a page of data (256 bytes) to the serial flash starting
3155 * at the given address. All the data must be written to the same page.
3156 * If @byte_oriented is set the write data is stored as byte stream
3157 * (i.e. matches what on disk), otherwise in big-endian.
3159 int t4_write_flash(struct adapter *adapter, unsigned int addr,
3160 unsigned int n, const u8 *data, int byte_oriented)
3163 u32 buf[SF_PAGE_SIZE / 4];
3164 unsigned int i, c, left, val, offset = addr & 0xff;
3166 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
3169 val = swab32(addr) | SF_PROG_PAGE;
3171 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3172 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
3175 for (left = n; left; left -= c) {
3177 for (val = 0, i = 0; i < c; ++i)
3178 val = (val << 8) + *data++;
3181 val = cpu_to_be32(val);
3183 ret = sf1_write(adapter, c, c != left, 1, val);
3187 ret = flash_wait_op(adapter, 8, 1);
3191 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3193 /* Read the page to verify the write succeeded */
3194 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
3199 if (memcmp(data - n, (u8 *)buf + offset, n)) {
3201 "failed to correctly write the flash page at %#x\n",
3208 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3213 * t4_get_fw_version - read the firmware version
3214 * @adapter: the adapter
3215 * @vers: where to place the version
3217 * Reads the FW version from flash.
3219 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
3221 return t4_read_flash(adapter, FLASH_FW_START +
3222 offsetof(struct fw_hdr, fw_ver), 1,
3227 * t4_get_bs_version - read the firmware bootstrap version
3228 * @adapter: the adapter
3229 * @vers: where to place the version
3231 * Reads the FW Bootstrap version from flash.
3233 int t4_get_bs_version(struct adapter *adapter, u32 *vers)
3235 return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
3236 offsetof(struct fw_hdr, fw_ver), 1,
3241 * t4_get_tp_version - read the TP microcode version
3242 * @adapter: the adapter
3243 * @vers: where to place the version
3245 * Reads the TP microcode version from flash.
3247 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
3249 return t4_read_flash(adapter, FLASH_FW_START +
3250 offsetof(struct fw_hdr, tp_microcode_ver),
3255 * t4_get_exprom_version - return the Expansion ROM version (if any)
3256 * @adapter: the adapter
3257 * @vers: where to place the version
3259 * Reads the Expansion ROM header from FLASH and returns the version
3260 * number (if present) through the @vers return value pointer. We return
3261 * this in the Firmware Version Format since it's convenient. Return
3262 * 0 on success, -ENOENT if no Expansion ROM is present.
3264 int t4_get_exprom_version(struct adapter *adap, u32 *vers)
3266 struct exprom_header {
3267 unsigned char hdr_arr[16]; /* must start with 0x55aa */
3268 unsigned char hdr_ver[4]; /* Expansion ROM version */
3270 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
3274 ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
3275 ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
3280 hdr = (struct exprom_header *)exprom_header_buf;
3281 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
3284 *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
3285 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
3286 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
3287 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
3292 * t4_get_scfg_version - return the Serial Configuration version
3293 * @adapter: the adapter
3294 * @vers: where to place the version
3296 * Reads the Serial Configuration Version via the Firmware interface
3297 * (thus this can only be called once we're ready to issue Firmware
3298 * commands). The format of the Serial Configuration version is
3299 * adapter specific. Returns 0 on success, an error on failure.
3301 * Note that early versions of the Firmware didn't include the ability
3302 * to retrieve the Serial Configuration version, so we zero-out the
3303 * return-value parameter in that case to avoid leaving it with
3306 * Also note that the Firmware will return its cached copy of the Serial
3307 * Initialization Revision ID, not the actual Revision ID as written in
3308 * the Serial EEPROM. This is only an issue if a new VPD has been written
3309 * and the Firmware/Chip haven't yet gone through a RESET sequence. So
3310 * it's best to defer calling this routine till after a FW_RESET_CMD has
3311 * been issued if the Host Driver will be performing a full adapter
3314 int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
3319 scfgrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3320 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_SCFGREV));
3321 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3322 1, &scfgrev_param, vers);
3329 * t4_get_vpd_version - return the VPD version
3330 * @adapter: the adapter
3331 * @vers: where to place the version
3333 * Reads the VPD via the Firmware interface (thus this can only be called
3334 * once we're ready to issue Firmware commands). The format of the
3335 * VPD version is adapter specific. Returns 0 on success, an error on
3338 * Note that early versions of the Firmware didn't include the ability
3339 * to retrieve the VPD version, so we zero-out the return-value parameter
3340 * in that case to avoid leaving it with garbage in it.
3342 * Also note that the Firmware will return its cached copy of the VPD
3343 * Revision ID, not the actual Revision ID as written in the Serial
3344 * EEPROM. This is only an issue if a new VPD has been written and the
3345 * Firmware/Chip haven't yet gone through a RESET sequence. So it's best
3346 * to defer calling this routine till after a FW_RESET_CMD has been issued
3347 * if the Host Driver will be performing a full adapter initialization.
3349 int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
3354 vpdrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3355 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_VPDREV));
3356 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3357 1, &vpdrev_param, vers);
3364 * t4_get_version_info - extract various chip/firmware version information
3365 * @adapter: the adapter
3367 * Reads various chip/firmware version numbers and stores them into the
3368 * adapter Adapter Parameters structure. If any of the efforts fails
3369 * the first failure will be returned, but all of the version numbers
3372 int t4_get_version_info(struct adapter *adapter)
3376 #define FIRST_RET(__getvinfo) \
3378 int __ret = __getvinfo; \
3379 if (__ret && !ret) \
3383 FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
3384 FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
3385 FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
3386 FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
3387 FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
3388 FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
3396 * t4_flash_erase_sectors - erase a range of flash sectors
3397 * @adapter: the adapter
3398 * @start: the first sector to erase
3399 * @end: the last sector to erase
3401 * Erases the sectors in the given inclusive range.
3403 int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
3407 if (end >= adapter->params.sf_nsec)
3410 while (start <= end) {
3411 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3412 (ret = sf1_write(adapter, 4, 0, 1,
3413 SF_ERASE_SECTOR | (start << 8))) != 0 ||
3414 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
3416 "erase of flash sector %d failed, error %d\n",
3422 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3427 * t4_flash_cfg_addr - return the address of the flash configuration file
3428 * @adapter: the adapter
3430 * Return the address within the flash where the Firmware Configuration
3431 * File is stored, or an error if the device FLASH is too small to contain
3432 * a Firmware Configuration File.
3434 int t4_flash_cfg_addr(struct adapter *adapter)
3437 * If the device FLASH isn't large enough to hold a Firmware
3438 * Configuration File, return an error.
3440 if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
3443 return FLASH_CFG_START;
3447 * Return TRUE if the specified firmware matches the adapter. I.e. T4
3448 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
3449 * and emit an error message for mismatched firmware to save our caller the
3452 static int t4_fw_matches_chip(struct adapter *adap,
3453 const struct fw_hdr *hdr)
3456 * The expression below will return FALSE for any unsupported adapter
3457 * which will keep us "honest" in the future ...
3459 if ((is_t4(adap) && hdr->chip == FW_HDR_CHIP_T4) ||
3460 (is_t5(adap) && hdr->chip == FW_HDR_CHIP_T5) ||
3461 (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6))
3465 "FW image (%d) is not suitable for this adapter (%d)\n",
3466 hdr->chip, chip_id(adap));
3471 * t4_load_fw - download firmware
3472 * @adap: the adapter
3473 * @fw_data: the firmware image to write
3476 * Write the supplied firmware image to the card's serial flash.
3478 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3483 u8 first_page[SF_PAGE_SIZE];
3484 const u32 *p = (const u32 *)fw_data;
3485 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
3486 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
3487 unsigned int fw_start_sec;
3488 unsigned int fw_start;
3489 unsigned int fw_size;
3491 if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) {
3492 fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
3493 fw_start = FLASH_FWBOOTSTRAP_START;
3494 fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
3496 fw_start_sec = FLASH_FW_START_SEC;
3497 fw_start = FLASH_FW_START;
3498 fw_size = FLASH_FW_MAX_SIZE;
3502 CH_ERR(adap, "FW image has no data\n");
3507 "FW image size not multiple of 512 bytes\n");
3510 if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) {
3512 "FW image size differs from size in FW header\n");
3515 if (size > fw_size) {
3516 CH_ERR(adap, "FW image too large, max is %u bytes\n",
3520 if (!t4_fw_matches_chip(adap, hdr))
3523 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
3524 csum += be32_to_cpu(p[i]);
3526 if (csum != 0xffffffff) {
3528 "corrupted firmware image, checksum %#x\n", csum);
3532 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
3533 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
3538 * We write the correct version at the end so the driver can see a bad
3539 * version if the FW write fails. Start by writing a copy of the
3540 * first page with a bad version.
3542 memcpy(first_page, fw_data, SF_PAGE_SIZE);
3543 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
3544 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
3549 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
3550 addr += SF_PAGE_SIZE;
3551 fw_data += SF_PAGE_SIZE;
3552 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
3557 ret = t4_write_flash(adap,
3558 fw_start + offsetof(struct fw_hdr, fw_ver),
3559 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
3562 CH_ERR(adap, "firmware download failed, error %d\n",
3568 * t4_fwcache - firmware cache operation
3569 * @adap: the adapter
3570 * @op : the operation (flush or flush and invalidate)
3572 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
3574 struct fw_params_cmd c;
3576 memset(&c, 0, sizeof(c));
3578 cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
3579 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
3580 V_FW_PARAMS_CMD_PFN(adap->pf) |
3581 V_FW_PARAMS_CMD_VFN(0));
3582 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3584 cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3585 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE));
3586 c.param[0].val = (__force __be32)op;
3588 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
3591 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
3592 unsigned int *pif_req_wrptr,
3593 unsigned int *pif_rsp_wrptr)
3596 u32 cfg, val, req, rsp;
3598 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
3599 if (cfg & F_LADBGEN)
3600 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
3602 val = t4_read_reg(adap, A_CIM_DEBUGSTS);
3603 req = G_POLADBGWRPTR(val);
3604 rsp = G_PILADBGWRPTR(val);
3606 *pif_req_wrptr = req;
3608 *pif_rsp_wrptr = rsp;
3610 for (i = 0; i < CIM_PIFLA_SIZE; i++) {
3611 for (j = 0; j < 6; j++) {
3612 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
3613 V_PILADBGRDPTR(rsp));
3614 *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
3615 *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
3619 req = (req + 2) & M_POLADBGRDPTR;
3620 rsp = (rsp + 2) & M_PILADBGRDPTR;
3622 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
3625 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
3630 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
3631 if (cfg & F_LADBGEN)
3632 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
3634 for (i = 0; i < CIM_MALA_SIZE; i++) {
3635 for (j = 0; j < 5; j++) {
3637 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
3638 V_PILADBGRDPTR(idx));
3639 *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
3640 *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
3643 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
3646 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
3650 for (i = 0; i < 8; i++) {
3651 u32 *p = la_buf + i;
3653 t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
3654 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
3655 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
3656 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
3657 *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
3662 * t4_link_l1cfg - apply link configuration to MAC/PHY
3663 * @phy: the PHY to setup
3664 * @mac: the MAC to setup
3665 * @lc: the requested link configuration
3667 * Set up a port's MAC and PHY according to a desired link configuration.
3668 * - If the PHY can auto-negotiate first decide what to advertise, then
3669 * enable/disable auto-negotiation as desired, and reset.
3670 * - If the PHY does not auto-negotiate just reset it.
3671 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
3672 * otherwise do it later based on the outcome of auto-negotiation.
3674 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
3675 struct link_config *lc)
3677 struct fw_port_cmd c;
3678 unsigned int mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
3679 unsigned int aneg, fc, fec, speed, rcap;
3682 if (lc->requested_fc & PAUSE_RX)
3683 fc |= FW_PORT_CAP_FC_RX;
3684 if (lc->requested_fc & PAUSE_TX)
3685 fc |= FW_PORT_CAP_FC_TX;
3688 if (lc->requested_fec & FEC_RS)
3689 fec = FW_PORT_CAP_FEC_RS;
3690 else if (lc->requested_fec & FEC_BASER_RS)
3691 fec = FW_PORT_CAP_FEC_BASER_RS;
3693 if (!(lc->supported & FW_PORT_CAP_ANEG) ||
3694 lc->requested_aneg == AUTONEG_DISABLE) {
3696 switch (lc->requested_speed) {
3698 speed = FW_PORT_CAP_SPEED_100G;
3701 speed = FW_PORT_CAP_SPEED_40G;
3704 speed = FW_PORT_CAP_SPEED_25G;
3707 speed = FW_PORT_CAP_SPEED_10G;
3710 speed = FW_PORT_CAP_SPEED_1G;
3713 speed = FW_PORT_CAP_SPEED_100M;
3720 aneg = FW_PORT_CAP_ANEG;
3721 speed = lc->supported &
3722 V_FW_PORT_CAP_SPEED(M_FW_PORT_CAP_SPEED);
3725 rcap = aneg | speed | fc | fec | mdi;
3726 if ((rcap | lc->supported) != lc->supported) {
3728 CH_WARN(adap, "rcap 0x%08x, pcap 0x%08x\n", rcap,
3731 rcap &= lc->supported;
3734 memset(&c, 0, sizeof(c));
3735 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
3736 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
3737 V_FW_PORT_CMD_PORTID(port));
3739 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
3741 c.u.l1cfg.rcap = cpu_to_be32(rcap);
3743 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
3747 * t4_restart_aneg - restart autonegotiation
3748 * @adap: the adapter
3749 * @mbox: mbox to use for the FW command
3750 * @port: the port id
3752 * Restarts autonegotiation for the selected port.
3754 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
3756 struct fw_port_cmd c;
3758 memset(&c, 0, sizeof(c));
3759 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
3760 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
3761 V_FW_PORT_CMD_PORTID(port));
3763 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
3765 c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
3766 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3769 typedef void (*int_handler_t)(struct adapter *adap);
3772 unsigned int mask; /* bits to check in interrupt status */
3773 const char *msg; /* message to print or NULL */
3774 short stat_idx; /* stat counter to increment or -1 */
3775 unsigned short fatal; /* whether the condition reported is fatal */
3776 int_handler_t int_handler; /* platform-specific int handler */
3780 * t4_handle_intr_status - table driven interrupt handler
3781 * @adapter: the adapter that generated the interrupt
3782 * @reg: the interrupt status register to process
3783 * @acts: table of interrupt actions
3785 * A table driven interrupt handler that applies a set of masks to an
3786 * interrupt status word and performs the corresponding actions if the
3787 * interrupts described by the mask have occurred. The actions include
3788 * optionally emitting a warning or alert message. The table is terminated
3789 * by an entry specifying mask 0. Returns the number of fatal interrupt
3792 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
3793 const struct intr_info *acts)
3796 unsigned int mask = 0;
3797 unsigned int status = t4_read_reg(adapter, reg);
3799 for ( ; acts->mask; ++acts) {
3800 if (!(status & acts->mask))
3804 CH_ALERT(adapter, "%s (0x%x)\n", acts->msg,
3805 status & acts->mask);
3806 } else if (acts->msg)
3807 CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n", acts->msg,
3808 status & acts->mask);
3809 if (acts->int_handler)
3810 acts->int_handler(adapter);
3814 if (status) /* clear processed interrupts */
3815 t4_write_reg(adapter, reg, status);
3820 * Interrupt handler for the PCIE module.
3822 static void pcie_intr_handler(struct adapter *adapter)
3824 static const struct intr_info sysbus_intr_info[] = {
3825 { F_RNPP, "RXNP array parity error", -1, 1 },
3826 { F_RPCP, "RXPC array parity error", -1, 1 },
3827 { F_RCIP, "RXCIF array parity error", -1, 1 },
3828 { F_RCCP, "Rx completions control array parity error", -1, 1 },
3829 { F_RFTP, "RXFT array parity error", -1, 1 },
3832 static const struct intr_info pcie_port_intr_info[] = {
3833 { F_TPCP, "TXPC array parity error", -1, 1 },
3834 { F_TNPP, "TXNP array parity error", -1, 1 },
3835 { F_TFTP, "TXFT array parity error", -1, 1 },
3836 { F_TCAP, "TXCA array parity error", -1, 1 },
3837 { F_TCIP, "TXCIF array parity error", -1, 1 },
3838 { F_RCAP, "RXCA array parity error", -1, 1 },
3839 { F_OTDD, "outbound request TLP discarded", -1, 1 },
3840 { F_RDPE, "Rx data parity error", -1, 1 },
3841 { F_TDUE, "Tx uncorrectable data error", -1, 1 },
3844 static const struct intr_info pcie_intr_info[] = {
3845 { F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
3846 { F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
3847 { F_MSIDATAPERR, "MSI data parity error", -1, 1 },
3848 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
3849 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
3850 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
3851 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
3852 { F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
3853 { F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
3854 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
3855 { F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
3856 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
3857 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
3858 { F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
3859 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
3860 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
3861 { F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
3862 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
3863 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
3864 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
3865 { F_FIDPERR, "PCI FID parity error", -1, 1 },
3866 { F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
3867 { F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
3868 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
3869 { F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
3870 { F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
3871 { F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
3872 { F_PCIESINT, "PCI core secondary fault", -1, 1 },
3873 { F_PCIEPINT, "PCI core primary fault", -1, 1 },
3874 { F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
3879 static const struct intr_info t5_pcie_intr_info[] = {
3880 { F_MSTGRPPERR, "Master Response Read Queue parity error",
3882 { F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
3883 { F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
3884 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
3885 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
3886 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
3887 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
3888 { F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
3890 { F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
3892 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
3893 { F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
3894 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
3895 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
3896 { F_DREQWRPERR, "PCI DMA channel write request parity error",
3898 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
3899 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
3900 { F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
3901 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
3902 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
3903 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
3904 { F_FIDPERR, "PCI FID parity error", -1, 1 },
3905 { F_VFIDPERR, "PCI INTx clear parity error", -1, 1 },
3906 { F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
3907 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
3908 { F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
3910 { F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
3912 { F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
3913 { F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
3914 { F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
3915 { F_READRSPERR, "Outbound read error", -1,
3923 fat = t4_handle_intr_status(adapter,
3924 A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
3926 t4_handle_intr_status(adapter,
3927 A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
3928 pcie_port_intr_info) +
3929 t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
3932 fat = t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
3935 t4_fatal_err(adapter);
3939 * TP interrupt handler.
3941 static void tp_intr_handler(struct adapter *adapter)
3943 static const struct intr_info tp_intr_info[] = {
3944 { 0x3fffffff, "TP parity error", -1, 1 },
3945 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
3949 if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
3950 t4_fatal_err(adapter);
3954 * SGE interrupt handler.
3956 static void sge_intr_handler(struct adapter *adapter)
3961 static const struct intr_info sge_intr_info[] = {
3962 { F_ERR_CPL_EXCEED_IQE_SIZE,
3963 "SGE received CPL exceeding IQE size", -1, 1 },
3964 { F_ERR_INVALID_CIDX_INC,
3965 "SGE GTS CIDX increment too large", -1, 0 },
3966 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
3967 { F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
3968 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
3969 "SGE IQID > 1023 received CPL for FL", -1, 0 },
3970 { F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
3972 { F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
3974 { F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
3976 { F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
3978 { F_ERR_ING_CTXT_PRIO,
3979 "SGE too many priority ingress contexts", -1, 0 },
3980 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
3981 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
3982 { F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 |
3983 F_ERR_PCIE_ERROR2 | F_ERR_PCIE_ERROR3,
3984 "SGE PCIe error for a DBP thread", -1, 0 },
3988 static const struct intr_info t4t5_sge_intr_info[] = {
3989 { F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
3990 { F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
3991 { F_ERR_EGR_CTXT_PRIO,
3992 "SGE too many priority egress contexts", -1, 0 },
3997 * For now, treat below interrupts as fatal so that we disable SGE and
3998 * get better debug */
3999 static const struct intr_info t6_sge_intr_info[] = {
4001 "SGE Actual WRE packet is less than advertized length",
4006 v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
4007 ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
4009 CH_ALERT(adapter, "SGE parity error (%#llx)\n",
4010 (unsigned long long)v);
4011 t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
4012 t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
4015 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
4016 if (chip_id(adapter) <= CHELSIO_T5)
4017 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
4018 t4t5_sge_intr_info);
4020 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
4023 err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
4024 if (err & F_ERROR_QID_VALID) {
4025 CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
4026 if (err & F_UNCAPTURED_ERROR)
4027 CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
4028 t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
4029 F_UNCAPTURED_ERROR);
4033 t4_fatal_err(adapter);
4036 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
4037 F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
4038 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
4039 F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
4042 * CIM interrupt handler.
4044 static void cim_intr_handler(struct adapter *adapter)
4046 static const struct intr_info cim_intr_info[] = {
4047 { F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
4048 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
4049 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
4050 { F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
4051 { F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
4052 { F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
4053 { F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
4054 { F_TIMER0INT, "CIM TIMER0 interrupt", -1, 1 },
4057 static const struct intr_info cim_upintr_info[] = {
4058 { F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
4059 { F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
4060 { F_ILLWRINT, "CIM illegal write", -1, 1 },
4061 { F_ILLRDINT, "CIM illegal read", -1, 1 },
4062 { F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
4063 { F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
4064 { F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
4065 { F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
4066 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
4067 { F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
4068 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
4069 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
4070 { F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
4071 { F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
4072 { F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
4073 { F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
4074 { F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
4075 { F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
4076 { F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
4077 { F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
4078 { F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
4079 { F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
4080 { F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
4081 { F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
4082 { F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
4083 { F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
4084 { F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
4085 { F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
4091 fw_err = t4_read_reg(adapter, A_PCIE_FW);
4092 if (fw_err & F_PCIE_FW_ERR)
4093 t4_report_fw_error(adapter);
4095 /* When the Firmware detects an internal error which normally wouldn't
4096 * raise a Host Interrupt, it forces a CIM Timer0 interrupt in order
4097 * to make sure the Host sees the Firmware Crash. So if we have a
4098 * Timer0 interrupt and don't see a Firmware Crash, ignore the Timer0
4101 val = t4_read_reg(adapter, A_CIM_HOST_INT_CAUSE);
4102 if (val & F_TIMER0INT)
4103 if (!(fw_err & F_PCIE_FW_ERR) ||
4104 (G_PCIE_FW_EVAL(fw_err) != PCIE_FW_EVAL_CRASH))
4105 t4_write_reg(adapter, A_CIM_HOST_INT_CAUSE,
4108 fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
4110 t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
4113 t4_fatal_err(adapter);
4117 * ULP RX interrupt handler.
4119 static void ulprx_intr_handler(struct adapter *adapter)
4121 static const struct intr_info ulprx_intr_info[] = {
4122 { F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
4123 { F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
4124 { 0x7fffff, "ULPRX parity error", -1, 1 },
4128 if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
4129 t4_fatal_err(adapter);
4133 * ULP TX interrupt handler.
4135 static void ulptx_intr_handler(struct adapter *adapter)
4137 static const struct intr_info ulptx_intr_info[] = {
4138 { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
4140 { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
4142 { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
4144 { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
4146 { 0xfffffff, "ULPTX parity error", -1, 1 },
4150 if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
4151 t4_fatal_err(adapter);
4155 * PM TX interrupt handler.
4157 static void pmtx_intr_handler(struct adapter *adapter)
4159 static const struct intr_info pmtx_intr_info[] = {
4160 { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
4161 { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
4162 { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
4163 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
4164 { 0xffffff0, "PMTX framing error", -1, 1 },
4165 { F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
4166 { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
4168 { F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
4169 { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
4173 if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
4174 t4_fatal_err(adapter);
4178 * PM RX interrupt handler.
4180 static void pmrx_intr_handler(struct adapter *adapter)
4182 static const struct intr_info pmrx_intr_info[] = {
4183 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
4184 { 0x3ffff0, "PMRX framing error", -1, 1 },
4185 { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
4186 { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
4188 { F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
4189 { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
4193 if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
4194 t4_fatal_err(adapter);
4198 * CPL switch interrupt handler.
4200 static void cplsw_intr_handler(struct adapter *adapter)
4202 static const struct intr_info cplsw_intr_info[] = {
4203 { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
4204 { F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
4205 { F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
4206 { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
4207 { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
4208 { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
4212 if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
4213 t4_fatal_err(adapter);
4217 * LE interrupt handler.
4219 static void le_intr_handler(struct adapter *adap)
4221 unsigned int chip_ver = chip_id(adap);
4222 static const struct intr_info le_intr_info[] = {
4223 { F_LIPMISS, "LE LIP miss", -1, 0 },
4224 { F_LIP0, "LE 0 LIP error", -1, 0 },
4225 { F_PARITYERR, "LE parity error", -1, 1 },
4226 { F_UNKNOWNCMD, "LE unknown command", -1, 1 },
4227 { F_REQQPARERR, "LE request queue parity error", -1, 1 },
4231 static const struct intr_info t6_le_intr_info[] = {
4232 { F_T6_LIPMISS, "LE LIP miss", -1, 0 },
4233 { F_T6_LIP0, "LE 0 LIP error", -1, 0 },
4234 { F_TCAMINTPERR, "LE parity error", -1, 1 },
4235 { F_T6_UNKNOWNCMD, "LE unknown command", -1, 1 },
4236 { F_SSRAMINTPERR, "LE request queue parity error", -1, 1 },
4240 if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE,
4241 (chip_ver <= CHELSIO_T5) ?
4242 le_intr_info : t6_le_intr_info))
4247 * MPS interrupt handler.
4249 static void mps_intr_handler(struct adapter *adapter)
4251 static const struct intr_info mps_rx_intr_info[] = {
4252 { 0xffffff, "MPS Rx parity error", -1, 1 },
4255 static const struct intr_info mps_tx_intr_info[] = {
4256 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
4257 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4258 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
4260 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
4262 { F_BUBBLE, "MPS Tx underflow", -1, 1 },
4263 { F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
4264 { F_FRMERR, "MPS Tx framing error", -1, 1 },
4267 static const struct intr_info mps_trc_intr_info[] = {
4268 { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
4269 { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
4271 { F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
4274 static const struct intr_info mps_stat_sram_intr_info[] = {
4275 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
4278 static const struct intr_info mps_stat_tx_intr_info[] = {
4279 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
4282 static const struct intr_info mps_stat_rx_intr_info[] = {
4283 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
4286 static const struct intr_info mps_cls_intr_info[] = {
4287 { F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
4288 { F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
4289 { F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
4295 fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
4297 t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
4299 t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
4300 mps_trc_intr_info) +
4301 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
4302 mps_stat_sram_intr_info) +
4303 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
4304 mps_stat_tx_intr_info) +
4305 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
4306 mps_stat_rx_intr_info) +
4307 t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
4310 t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
4311 t4_read_reg(adapter, A_MPS_INT_CAUSE); /* flush */
4313 t4_fatal_err(adapter);
4316 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | \
4320 * EDC/MC interrupt handler.
4322 static void mem_intr_handler(struct adapter *adapter, int idx)
4324 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
4326 unsigned int addr, cnt_addr, v;
4328 if (idx <= MEM_EDC1) {
4329 addr = EDC_REG(A_EDC_INT_CAUSE, idx);
4330 cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
4331 } else if (idx == MEM_MC) {
4332 if (is_t4(adapter)) {
4333 addr = A_MC_INT_CAUSE;
4334 cnt_addr = A_MC_ECC_STATUS;
4336 addr = A_MC_P_INT_CAUSE;
4337 cnt_addr = A_MC_P_ECC_STATUS;
4340 addr = MC_REG(A_MC_P_INT_CAUSE, 1);
4341 cnt_addr = MC_REG(A_MC_P_ECC_STATUS, 1);
4344 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
4345 if (v & F_PERR_INT_CAUSE)
4346 CH_ALERT(adapter, "%s FIFO parity error\n",
4348 if (v & F_ECC_CE_INT_CAUSE) {
4349 u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
4351 if (idx <= MEM_EDC1)
4352 t4_edc_err_read(adapter, idx);
4354 t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
4355 CH_WARN_RATELIMIT(adapter,
4356 "%u %s correctable ECC data error%s\n",
4357 cnt, name[idx], cnt > 1 ? "s" : "");
4359 if (v & F_ECC_UE_INT_CAUSE)
4361 "%s uncorrectable ECC data error\n", name[idx]);
4363 t4_write_reg(adapter, addr, v);
4364 if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
4365 t4_fatal_err(adapter);
4369 * MA interrupt handler.
4371 static void ma_intr_handler(struct adapter *adapter)
4373 u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
4375 if (status & F_MEM_PERR_INT_CAUSE) {
4377 "MA parity error, parity status %#x\n",
4378 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS1));
4381 "MA parity error, parity status %#x\n",
4382 t4_read_reg(adapter,
4383 A_MA_PARITY_ERROR_STATUS2));
4385 if (status & F_MEM_WRAP_INT_CAUSE) {
4386 v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
4387 CH_ALERT(adapter, "MA address wrap-around error by "
4388 "client %u to address %#x\n",
4389 G_MEM_WRAP_CLIENT_NUM(v),
4390 G_MEM_WRAP_ADDRESS(v) << 4);
4392 t4_write_reg(adapter, A_MA_INT_CAUSE, status);
4393 t4_fatal_err(adapter);
4397 * SMB interrupt handler.
4399 static void smb_intr_handler(struct adapter *adap)
4401 static const struct intr_info smb_intr_info[] = {
4402 { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
4403 { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
4404 { F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
4408 if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
4413 * NC-SI interrupt handler.
4415 static void ncsi_intr_handler(struct adapter *adap)
4417 static const struct intr_info ncsi_intr_info[] = {
4418 { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
4419 { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
4420 { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
4421 { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
4425 if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
4430 * XGMAC interrupt handler.
4432 static void xgmac_intr_handler(struct adapter *adap, int port)
4434 u32 v, int_cause_reg;
4437 int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
4439 int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
4441 v = t4_read_reg(adap, int_cause_reg);
4443 v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR);
4447 if (v & F_TXFIFO_PRTY_ERR)
4448 CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n",
4450 if (v & F_RXFIFO_PRTY_ERR)
4451 CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n",
4453 t4_write_reg(adap, int_cause_reg, v);
4458 * PL interrupt handler.
4460 static void pl_intr_handler(struct adapter *adap)
4462 static const struct intr_info pl_intr_info[] = {
4463 { F_FATALPERR, "Fatal parity error", -1, 1 },
4464 { F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
4468 static const struct intr_info t5_pl_intr_info[] = {
4469 { F_FATALPERR, "Fatal parity error", -1, 1 },
4473 if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE,
4475 pl_intr_info : t5_pl_intr_info))
4479 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
4482 * t4_slow_intr_handler - control path interrupt handler
4483 * @adapter: the adapter
4485 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
4486 * The designation 'slow' is because it involves register reads, while
4487 * data interrupts typically don't involve any MMIOs.
4489 int t4_slow_intr_handler(struct adapter *adapter)
4491 u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
4493 if (!(cause & GLBL_INTR_MASK))
4496 cim_intr_handler(adapter);
4498 mps_intr_handler(adapter);
4500 ncsi_intr_handler(adapter);
4502 pl_intr_handler(adapter);
4504 smb_intr_handler(adapter);
4506 xgmac_intr_handler(adapter, 0);
4508 xgmac_intr_handler(adapter, 1);
4510 xgmac_intr_handler(adapter, 2);
4512 xgmac_intr_handler(adapter, 3);
4514 pcie_intr_handler(adapter);
4516 mem_intr_handler(adapter, MEM_MC);
4517 if (is_t5(adapter) && (cause & F_MC1))
4518 mem_intr_handler(adapter, MEM_MC1);
4520 mem_intr_handler(adapter, MEM_EDC0);
4522 mem_intr_handler(adapter, MEM_EDC1);
4524 le_intr_handler(adapter);
4526 tp_intr_handler(adapter);
4528 ma_intr_handler(adapter);
4529 if (cause & F_PM_TX)
4530 pmtx_intr_handler(adapter);
4531 if (cause & F_PM_RX)
4532 pmrx_intr_handler(adapter);
4533 if (cause & F_ULP_RX)
4534 ulprx_intr_handler(adapter);
4535 if (cause & F_CPL_SWITCH)
4536 cplsw_intr_handler(adapter);
4538 sge_intr_handler(adapter);
4539 if (cause & F_ULP_TX)
4540 ulptx_intr_handler(adapter);
4542 /* Clear the interrupts just processed for which we are the master. */
4543 t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
4544 (void)t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
4549 * t4_intr_enable - enable interrupts
4550 * @adapter: the adapter whose interrupts should be enabled
4552 * Enable PF-specific interrupts for the calling function and the top-level
4553 * interrupt concentrator for global interrupts. Interrupts are already
4554 * enabled at each module, here we just enable the roots of the interrupt
4557 * Note: this function should be called only when the driver manages
4558 * non PF-specific interrupts from the various HW modules. Only one PCI
4559 * function at a time should be doing this.
4561 void t4_intr_enable(struct adapter *adapter)
4564 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4565 u32 pf = (chip_id(adapter) <= CHELSIO_T5
4566 ? G_SOURCEPF(whoami)
4567 : G_T6_SOURCEPF(whoami));
4569 if (chip_id(adapter) <= CHELSIO_T5)
4570 val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
4572 val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN;
4573 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
4574 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
4575 F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR |
4576 F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
4577 F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
4578 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
4579 F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val);
4580 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
4581 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
4585 * t4_intr_disable - disable interrupts
4586 * @adapter: the adapter whose interrupts should be disabled
4588 * Disable interrupts. We only disable the top-level interrupt
4589 * concentrators. The caller must be a PCI function managing global
4592 void t4_intr_disable(struct adapter *adapter)
4594 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4595 u32 pf = (chip_id(adapter) <= CHELSIO_T5
4596 ? G_SOURCEPF(whoami)
4597 : G_T6_SOURCEPF(whoami));
4599 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
4600 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
4604 * t4_intr_clear - clear all interrupts
4605 * @adapter: the adapter whose interrupts should be cleared
4607 * Clears all interrupts. The caller must be a PCI function managing
4608 * global interrupts.
4610 void t4_intr_clear(struct adapter *adapter)
4612 static const unsigned int cause_reg[] = {
4613 A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
4614 A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
4615 A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS1, A_MA_INT_CAUSE,
4616 A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
4617 A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
4618 MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
4620 A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE,
4621 A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE,
4622 A_MPS_RX_PERR_INT_CAUSE,
4624 MYPF_REG(A_PL_PF_INT_CAUSE),
4631 for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
4632 t4_write_reg(adapter, cause_reg[i], 0xffffffff);
4634 t4_write_reg(adapter, is_t4(adapter) ? A_MC_INT_CAUSE :
4635 A_MC_P_INT_CAUSE, 0xffffffff);
4637 if (is_t4(adapter)) {
4638 t4_write_reg(adapter, A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
4640 t4_write_reg(adapter, A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
4643 t4_write_reg(adapter, A_MA_PARITY_ERROR_STATUS2, 0xffffffff);
4645 t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
4646 (void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
4650 * hash_mac_addr - return the hash value of a MAC address
4651 * @addr: the 48-bit Ethernet MAC address
4653 * Hashes a MAC address according to the hash function used by HW inexact
4654 * (hash) address matching.
4656 static int hash_mac_addr(const u8 *addr)
4658 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
4659 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
4667 * t4_config_rss_range - configure a portion of the RSS mapping table
4668 * @adapter: the adapter
4669 * @mbox: mbox to use for the FW command
4670 * @viid: virtual interface whose RSS subtable is to be written
4671 * @start: start entry in the table to write
4672 * @n: how many table entries to write
4673 * @rspq: values for the "response queue" (Ingress Queue) lookup table
4674 * @nrspq: number of values in @rspq
4676 * Programs the selected part of the VI's RSS mapping table with the
4677 * provided values. If @nrspq < @n the supplied values are used repeatedly
4678 * until the full table range is populated.
4680 * The caller must ensure the values in @rspq are in the range allowed for
4683 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
4684 int start, int n, const u16 *rspq, unsigned int nrspq)
4687 const u16 *rsp = rspq;
4688 const u16 *rsp_end = rspq + nrspq;
4689 struct fw_rss_ind_tbl_cmd cmd;
4691 memset(&cmd, 0, sizeof(cmd));
4692 cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
4693 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4694 V_FW_RSS_IND_TBL_CMD_VIID(viid));
4695 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
4698 * Each firmware RSS command can accommodate up to 32 RSS Ingress
4699 * Queue Identifiers. These Ingress Queue IDs are packed three to
4700 * a 32-bit word as 10-bit values with the upper remaining 2 bits
4704 int nq = min(n, 32);
4706 __be32 *qp = &cmd.iq0_to_iq2;
4709 * Set up the firmware RSS command header to send the next
4710 * "nq" Ingress Queue IDs to the firmware.
4712 cmd.niqid = cpu_to_be16(nq);
4713 cmd.startidx = cpu_to_be16(start);
4716 * "nq" more done for the start of the next loop.
4722 * While there are still Ingress Queue IDs to stuff into the
4723 * current firmware RSS command, retrieve them from the
4724 * Ingress Queue ID array and insert them into the command.
4728 * Grab up to the next 3 Ingress Queue IDs (wrapping
4729 * around the Ingress Queue ID array if necessary) and
4730 * insert them into the firmware RSS command at the
4731 * current 3-tuple position within the commad.
4735 int nqbuf = min(3, nq);
4738 qbuf[0] = qbuf[1] = qbuf[2] = 0;
4739 while (nqbuf && nq_packed < 32) {
4746 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
4747 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
4748 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
4752 * Send this portion of the RRS table update to the firmware;
4753 * bail out on any errors.
4755 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
4763 * t4_config_glbl_rss - configure the global RSS mode
4764 * @adapter: the adapter
4765 * @mbox: mbox to use for the FW command
4766 * @mode: global RSS mode
4767 * @flags: mode-specific flags
4769 * Sets the global RSS mode.
4771 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
4774 struct fw_rss_glb_config_cmd c;
4776 memset(&c, 0, sizeof(c));
4777 c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
4778 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
4779 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4780 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
4781 c.u.manual.mode_pkd =
4782 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
4783 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
4784 c.u.basicvirtual.mode_keymode =
4785 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
4786 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
4789 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
4793 * t4_config_vi_rss - configure per VI RSS settings
4794 * @adapter: the adapter
4795 * @mbox: mbox to use for the FW command
4798 * @defq: id of the default RSS queue for the VI.
4799 * @skeyidx: RSS secret key table index for non-global mode
4800 * @skey: RSS vf_scramble key for VI.
4802 * Configures VI-specific RSS properties.
4804 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
4805 unsigned int flags, unsigned int defq, unsigned int skeyidx,
4808 struct fw_rss_vi_config_cmd c;
4810 memset(&c, 0, sizeof(c));
4811 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
4812 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4813 V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
4814 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4815 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
4816 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
4817 c.u.basicvirtual.secretkeyidx_pkd = cpu_to_be32(
4818 V_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(skeyidx));
4819 c.u.basicvirtual.secretkeyxor = cpu_to_be32(skey);
4821 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
4824 /* Read an RSS table row */
4825 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
4827 t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
4828 return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
4833 * t4_read_rss - read the contents of the RSS mapping table
4834 * @adapter: the adapter
4835 * @map: holds the contents of the RSS mapping table
4837 * Reads the contents of the RSS hash->queue mapping table.
4839 int t4_read_rss(struct adapter *adapter, u16 *map)
4844 for (i = 0; i < RSS_NENTRIES / 2; ++i) {
4845 ret = rd_rss_row(adapter, i, &val);
4848 *map++ = G_LKPTBLQUEUE0(val);
4849 *map++ = G_LKPTBLQUEUE1(val);
4855 * t4_tp_fw_ldst_rw - Access TP indirect register through LDST
4856 * @adap: the adapter
4857 * @cmd: TP fw ldst address space type
4858 * @vals: where the indirect register values are stored/written
4859 * @nregs: how many indirect registers to read/write
4860 * @start_idx: index of first indirect register to read/write
4861 * @rw: Read (1) or Write (0)
4862 * @sleep_ok: if true we may sleep while awaiting command completion
4864 * Access TP indirect registers through LDST
4866 static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals,
4867 unsigned int nregs, unsigned int start_index,
4868 unsigned int rw, bool sleep_ok)
4872 struct fw_ldst_cmd c;
4874 for (i = 0; i < nregs; i++) {
4875 memset(&c, 0, sizeof(c));
4876 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
4878 (rw ? F_FW_CMD_READ :
4880 V_FW_LDST_CMD_ADDRSPACE(cmd));
4881 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
4883 c.u.addrval.addr = cpu_to_be32(start_index + i);
4884 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
4885 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c,
4891 vals[i] = be32_to_cpu(c.u.addrval.val);
4897 * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor
4898 * @adap: the adapter
4899 * @reg_addr: Address Register
4900 * @reg_data: Data register
4901 * @buff: where the indirect register values are stored/written
4902 * @nregs: how many indirect registers to read/write
4903 * @start_index: index of first indirect register to read/write
4904 * @rw: READ(1) or WRITE(0)
4905 * @sleep_ok: if true we may sleep while awaiting command completion
4907 * Read/Write TP indirect registers through LDST if possible.
4908 * Else, use backdoor access
4910 static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data,
4911 u32 *buff, u32 nregs, u32 start_index, int rw,
4919 cmd = FW_LDST_ADDRSPC_TP_PIO;
4921 case A_TP_TM_PIO_ADDR:
4922 cmd = FW_LDST_ADDRSPC_TP_TM_PIO;
4924 case A_TP_MIB_INDEX:
4925 cmd = FW_LDST_ADDRSPC_TP_MIB;
4928 goto indirect_access;
4931 if (t4_use_ldst(adap))
4932 rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw,
4939 t4_read_indirect(adap, reg_addr, reg_data, buff, nregs,
4942 t4_write_indirect(adap, reg_addr, reg_data, buff, nregs,
4948 * t4_tp_pio_read - Read TP PIO registers
4949 * @adap: the adapter
4950 * @buff: where the indirect register values are written
4951 * @nregs: how many indirect registers to read
4952 * @start_index: index of first indirect register to read
4953 * @sleep_ok: if true we may sleep while awaiting command completion
4955 * Read TP PIO Registers
4957 void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
4958 u32 start_index, bool sleep_ok)
4960 t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs,
4961 start_index, 1, sleep_ok);
4965 * t4_tp_pio_write - Write TP PIO registers
4966 * @adap: the adapter
4967 * @buff: where the indirect register values are stored
4968 * @nregs: how many indirect registers to write
4969 * @start_index: index of first indirect register to write
4970 * @sleep_ok: if true we may sleep while awaiting command completion
4972 * Write TP PIO Registers
4974 void t4_tp_pio_write(struct adapter *adap, const u32 *buff, u32 nregs,
4975 u32 start_index, bool sleep_ok)
4977 t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4978 __DECONST(u32 *, buff), nregs, start_index, 0, sleep_ok);
4982 * t4_tp_tm_pio_read - Read TP TM PIO registers
4983 * @adap: the adapter
4984 * @buff: where the indirect register values are written
4985 * @nregs: how many indirect registers to read
4986 * @start_index: index of first indirect register to read
4987 * @sleep_ok: if true we may sleep while awaiting command completion
4989 * Read TP TM PIO Registers
4991 void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
4992 u32 start_index, bool sleep_ok)
4994 t4_tp_indirect_rw(adap, A_TP_TM_PIO_ADDR, A_TP_TM_PIO_DATA, buff,
4995 nregs, start_index, 1, sleep_ok);
4999 * t4_tp_mib_read - Read TP MIB registers
5000 * @adap: the adapter
5001 * @buff: where the indirect register values are written
5002 * @nregs: how many indirect registers to read
5003 * @start_index: index of first indirect register to read
5004 * @sleep_ok: if true we may sleep while awaiting command completion
5006 * Read TP MIB Registers
5008 void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index,
5011 t4_tp_indirect_rw(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, buff, nregs,
5012 start_index, 1, sleep_ok);
5016 * t4_read_rss_key - read the global RSS key
5017 * @adap: the adapter
5018 * @key: 10-entry array holding the 320-bit RSS key
5019 * @sleep_ok: if true we may sleep while awaiting command completion
5021 * Reads the global 320-bit RSS key.
5023 void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok)
5025 t4_tp_pio_read(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
5029 * t4_write_rss_key - program one of the RSS keys
5030 * @adap: the adapter
5031 * @key: 10-entry array holding the 320-bit RSS key
5032 * @idx: which RSS key to write
5033 * @sleep_ok: if true we may sleep while awaiting command completion
5035 * Writes one of the RSS keys with the given 320-bit value. If @idx is
5036 * 0..15 the corresponding entry in the RSS key table is written,
5037 * otherwise the global RSS key is written.
5039 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
5042 u8 rss_key_addr_cnt = 16;
5043 u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
5046 * T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
5047 * allows access to key addresses 16-63 by using KeyWrAddrX
5048 * as index[5:4](upper 2) into key table
5050 if ((chip_id(adap) > CHELSIO_T5) &&
5051 (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
5052 rss_key_addr_cnt = 32;
5054 t4_tp_pio_write(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
5056 if (idx >= 0 && idx < rss_key_addr_cnt) {
5057 if (rss_key_addr_cnt > 16)
5058 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
5059 vrt | V_KEYWRADDRX(idx >> 4) |
5060 V_T6_VFWRADDR(idx) | F_KEYWREN);
5062 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
5063 vrt| V_KEYWRADDR(idx) | F_KEYWREN);
5068 * t4_read_rss_pf_config - read PF RSS Configuration Table
5069 * @adapter: the adapter
5070 * @index: the entry in the PF RSS table to read
5071 * @valp: where to store the returned value
5072 * @sleep_ok: if true we may sleep while awaiting command completion
5074 * Reads the PF RSS Configuration Table at the specified index and returns
5075 * the value found there.
5077 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
5078 u32 *valp, bool sleep_ok)
5080 t4_tp_pio_read(adapter, valp, 1, A_TP_RSS_PF0_CONFIG + index, sleep_ok);
5084 * t4_write_rss_pf_config - write PF RSS Configuration Table
5085 * @adapter: the adapter
5086 * @index: the entry in the VF RSS table to read
5087 * @val: the value to store
5088 * @sleep_ok: if true we may sleep while awaiting command completion
5090 * Writes the PF RSS Configuration Table at the specified index with the
5093 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index,
5094 u32 val, bool sleep_ok)
5096 t4_tp_pio_write(adapter, &val, 1, A_TP_RSS_PF0_CONFIG + index,
5101 * t4_read_rss_vf_config - read VF RSS Configuration Table
5102 * @adapter: the adapter
5103 * @index: the entry in the VF RSS table to read
5104 * @vfl: where to store the returned VFL
5105 * @vfh: where to store the returned VFH
5106 * @sleep_ok: if true we may sleep while awaiting command completion
5108 * Reads the VF RSS Configuration Table at the specified index and returns
5109 * the (VFL, VFH) values found there.
5111 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
5112 u32 *vfl, u32 *vfh, bool sleep_ok)
5114 u32 vrt, mask, data;
5116 if (chip_id(adapter) <= CHELSIO_T5) {
5117 mask = V_VFWRADDR(M_VFWRADDR);
5118 data = V_VFWRADDR(index);
5120 mask = V_T6_VFWRADDR(M_T6_VFWRADDR);
5121 data = V_T6_VFWRADDR(index);
5124 * Request that the index'th VF Table values be read into VFL/VFH.
5126 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
5127 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
5128 vrt |= data | F_VFRDEN;
5129 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
5132 * Grab the VFL/VFH values ...
5134 t4_tp_pio_read(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok);
5135 t4_tp_pio_read(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok);
5139 * t4_write_rss_vf_config - write VF RSS Configuration Table
5141 * @adapter: the adapter
5142 * @index: the entry in the VF RSS table to write
5143 * @vfl: the VFL to store
5144 * @vfh: the VFH to store
5146 * Writes the VF RSS Configuration Table at the specified index with the
5147 * specified (VFL, VFH) values.
5149 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
5150 u32 vfl, u32 vfh, bool sleep_ok)
5152 u32 vrt, mask, data;
5154 if (chip_id(adapter) <= CHELSIO_T5) {
5155 mask = V_VFWRADDR(M_VFWRADDR);
5156 data = V_VFWRADDR(index);
5158 mask = V_T6_VFWRADDR(M_T6_VFWRADDR);
5159 data = V_T6_VFWRADDR(index);
5163 * Load up VFL/VFH with the values to be written ...
5165 t4_tp_pio_write(adapter, &vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok);
5166 t4_tp_pio_write(adapter, &vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok);
5169 * Write the VFL/VFH into the VF Table at index'th location.
5171 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
5172 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
5173 vrt |= data | F_VFRDEN;
5174 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
5178 * t4_read_rss_pf_map - read PF RSS Map
5179 * @adapter: the adapter
5180 * @sleep_ok: if true we may sleep while awaiting command completion
5182 * Reads the PF RSS Map register and returns its value.
5184 u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok)
5188 t4_tp_pio_read(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok);
5194 * t4_write_rss_pf_map - write PF RSS Map
5195 * @adapter: the adapter
5196 * @pfmap: PF RSS Map value
5198 * Writes the specified value to the PF RSS Map register.
5200 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap, bool sleep_ok)
5202 t4_tp_pio_write(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok);
5206 * t4_read_rss_pf_mask - read PF RSS Mask
5207 * @adapter: the adapter
5208 * @sleep_ok: if true we may sleep while awaiting command completion
5210 * Reads the PF RSS Mask register and returns its value.
5212 u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok)
5216 t4_tp_pio_read(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok);
5222 * t4_write_rss_pf_mask - write PF RSS Mask
5223 * @adapter: the adapter
5224 * @pfmask: PF RSS Mask value
5226 * Writes the specified value to the PF RSS Mask register.
5228 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask, bool sleep_ok)
5230 t4_tp_pio_write(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok);
5234 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
5235 * @adap: the adapter
5236 * @v4: holds the TCP/IP counter values
5237 * @v6: holds the TCP/IPv6 counter values
5238 * @sleep_ok: if true we may sleep while awaiting command completion
5240 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
5241 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
5243 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
5244 struct tp_tcp_stats *v6, bool sleep_ok)
5246 u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
5248 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
5249 #define STAT(x) val[STAT_IDX(x)]
5250 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
5253 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5254 A_TP_MIB_TCP_OUT_RST, sleep_ok);
5255 v4->tcp_out_rsts = STAT(OUT_RST);
5256 v4->tcp_in_segs = STAT64(IN_SEG);
5257 v4->tcp_out_segs = STAT64(OUT_SEG);
5258 v4->tcp_retrans_segs = STAT64(RXT_SEG);
5261 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5262 A_TP_MIB_TCP_V6OUT_RST, sleep_ok);
5263 v6->tcp_out_rsts = STAT(OUT_RST);
5264 v6->tcp_in_segs = STAT64(IN_SEG);
5265 v6->tcp_out_segs = STAT64(OUT_SEG);
5266 v6->tcp_retrans_segs = STAT64(RXT_SEG);
5274 * t4_tp_get_err_stats - read TP's error MIB counters
5275 * @adap: the adapter
5276 * @st: holds the counter values
5277 * @sleep_ok: if true we may sleep while awaiting command completion
5279 * Returns the values of TP's error counters.
5281 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
5284 int nchan = adap->chip_params->nchan;
5286 t4_tp_mib_read(adap, st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0,
5289 t4_tp_mib_read(adap, st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0,
5292 t4_tp_mib_read(adap, st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0,
5295 t4_tp_mib_read(adap, st->tnl_cong_drops, nchan,
5296 A_TP_MIB_TNL_CNG_DROP_0, sleep_ok);
5298 t4_tp_mib_read(adap, st->ofld_chan_drops, nchan,
5299 A_TP_MIB_OFD_CHN_DROP_0, sleep_ok);
5301 t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0,
5304 t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan,
5305 A_TP_MIB_OFD_VLN_DROP_0, sleep_ok);
5307 t4_tp_mib_read(adap, st->tcp6_in_errs, nchan,
5308 A_TP_MIB_TCP_V6IN_ERR_0, sleep_ok);
5310 t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP,
5315 * t4_tp_get_proxy_stats - read TP's proxy MIB counters
5316 * @adap: the adapter
5317 * @st: holds the counter values
5319 * Returns the values of TP's proxy counters.
5321 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st,
5324 int nchan = adap->chip_params->nchan;
5326 t4_tp_mib_read(adap, st->proxy, nchan, A_TP_MIB_TNL_LPBK_0, sleep_ok);
5330 * t4_tp_get_cpl_stats - read TP's CPL MIB counters
5331 * @adap: the adapter
5332 * @st: holds the counter values
5333 * @sleep_ok: if true we may sleep while awaiting command completion
5335 * Returns the values of TP's CPL counters.
5337 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
5340 int nchan = adap->chip_params->nchan;
5342 t4_tp_mib_read(adap, st->req, nchan, A_TP_MIB_CPL_IN_REQ_0, sleep_ok);
5344 t4_tp_mib_read(adap, st->rsp, nchan, A_TP_MIB_CPL_OUT_RSP_0, sleep_ok);
5348 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
5349 * @adap: the adapter
5350 * @st: holds the counter values
5352 * Returns the values of TP's RDMA counters.
5354 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
5357 t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, A_TP_MIB_RQE_DFR_PKT,
5362 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
5363 * @adap: the adapter
5364 * @idx: the port index
5365 * @st: holds the counter values
5366 * @sleep_ok: if true we may sleep while awaiting command completion
5368 * Returns the values of TP's FCoE counters for the selected port.
5370 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
5371 struct tp_fcoe_stats *st, bool sleep_ok)
5375 t4_tp_mib_read(adap, &st->frames_ddp, 1, A_TP_MIB_FCOE_DDP_0 + idx,
5378 t4_tp_mib_read(adap, &st->frames_drop, 1,
5379 A_TP_MIB_FCOE_DROP_0 + idx, sleep_ok);
5381 t4_tp_mib_read(adap, val, 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx,
5384 st->octets_ddp = ((u64)val[0] << 32) | val[1];
5388 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
5389 * @adap: the adapter
5390 * @st: holds the counter values
5391 * @sleep_ok: if true we may sleep while awaiting command completion
5393 * Returns the values of TP's counters for non-TCP directly-placed packets.
5395 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
5400 t4_tp_mib_read(adap, val, 4, A_TP_MIB_USM_PKTS, sleep_ok);
5402 st->frames = val[0];
5404 st->octets = ((u64)val[2] << 32) | val[3];
5408 * t4_read_mtu_tbl - returns the values in the HW path MTU table
5409 * @adap: the adapter
5410 * @mtus: where to store the MTU values
5411 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
5413 * Reads the HW path MTU table.
5415 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
5420 for (i = 0; i < NMTUS; ++i) {
5421 t4_write_reg(adap, A_TP_MTU_TABLE,
5422 V_MTUINDEX(0xff) | V_MTUVALUE(i));
5423 v = t4_read_reg(adap, A_TP_MTU_TABLE);
5424 mtus[i] = G_MTUVALUE(v);
5426 mtu_log[i] = G_MTUWIDTH(v);
5431 * t4_read_cong_tbl - reads the congestion control table
5432 * @adap: the adapter
5433 * @incr: where to store the alpha values
5435 * Reads the additive increments programmed into the HW congestion
5438 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
5440 unsigned int mtu, w;
5442 for (mtu = 0; mtu < NMTUS; ++mtu)
5443 for (w = 0; w < NCCTRL_WIN; ++w) {
5444 t4_write_reg(adap, A_TP_CCTRL_TABLE,
5445 V_ROWINDEX(0xffff) | (mtu << 5) | w);
5446 incr[mtu][w] = (u16)t4_read_reg(adap,
5447 A_TP_CCTRL_TABLE) & 0x1fff;
5452 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
5453 * @adap: the adapter
5454 * @addr: the indirect TP register address
5455 * @mask: specifies the field within the register to modify
5456 * @val: new value for the field
5458 * Sets a field of an indirect TP register to the given value.
5460 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
5461 unsigned int mask, unsigned int val)
5463 t4_write_reg(adap, A_TP_PIO_ADDR, addr);
5464 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
5465 t4_write_reg(adap, A_TP_PIO_DATA, val);
5469 * init_cong_ctrl - initialize congestion control parameters
5470 * @a: the alpha values for congestion control
5471 * @b: the beta values for congestion control
5473 * Initialize the congestion control parameters.
5475 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
5477 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
5502 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
5505 b[13] = b[14] = b[15] = b[16] = 3;
5506 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
5507 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
5512 /* The minimum additive increment value for the congestion control table */
5513 #define CC_MIN_INCR 2U
5516 * t4_load_mtus - write the MTU and congestion control HW tables
5517 * @adap: the adapter
5518 * @mtus: the values for the MTU table
5519 * @alpha: the values for the congestion control alpha parameter
5520 * @beta: the values for the congestion control beta parameter
5522 * Write the HW MTU table with the supplied MTUs and the high-speed
5523 * congestion control table with the supplied alpha, beta, and MTUs.
5524 * We write the two tables together because the additive increments
5525 * depend on the MTUs.
5527 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
5528 const unsigned short *alpha, const unsigned short *beta)
5530 static const unsigned int avg_pkts[NCCTRL_WIN] = {
5531 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
5532 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
5533 28672, 40960, 57344, 81920, 114688, 163840, 229376
5538 for (i = 0; i < NMTUS; ++i) {
5539 unsigned int mtu = mtus[i];
5540 unsigned int log2 = fls(mtu);
5542 if (!(mtu & ((1 << log2) >> 2))) /* round */
5544 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
5545 V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
5547 for (w = 0; w < NCCTRL_WIN; ++w) {
5550 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
5553 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
5554 (w << 16) | (beta[w] << 13) | inc);
5560 * t4_set_pace_tbl - set the pace table
5561 * @adap: the adapter
5562 * @pace_vals: the pace values in microseconds
5563 * @start: index of the first entry in the HW pace table to set
5564 * @n: how many entries to set
5566 * Sets (a subset of the) HW pace table.
5568 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
5569 unsigned int start, unsigned int n)
5571 unsigned int vals[NTX_SCHED], i;
5572 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
5577 /* convert values from us to dack ticks, rounding to closest value */
5578 for (i = 0; i < n; i++, pace_vals++) {
5579 vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
5580 if (vals[i] > 0x7ff)
5582 if (*pace_vals && vals[i] == 0)
5585 for (i = 0; i < n; i++, start++)
5586 t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
5591 * t4_set_sched_bps - set the bit rate for a HW traffic scheduler
5592 * @adap: the adapter
5593 * @kbps: target rate in Kbps
5594 * @sched: the scheduler index
5596 * Configure a Tx HW scheduler for the target rate.
5598 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
5600 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
5601 unsigned int clk = adap->params.vpd.cclk * 1000;
5602 unsigned int selected_cpt = 0, selected_bpt = 0;
5605 kbps *= 125; /* -> bytes */
5606 for (cpt = 1; cpt <= 255; cpt++) {
5608 bpt = (kbps + tps / 2) / tps;
5609 if (bpt > 0 && bpt <= 255) {
5611 delta = v >= kbps ? v - kbps : kbps - v;
5612 if (delta < mindelta) {
5617 } else if (selected_cpt)
5623 t4_write_reg(adap, A_TP_TM_PIO_ADDR,
5624 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
5625 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
5627 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
5629 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
5630 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
5635 * t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
5636 * @adap: the adapter
5637 * @sched: the scheduler index
5638 * @ipg: the interpacket delay in tenths of nanoseconds
5640 * Set the interpacket delay for a HW packet rate scheduler.
5642 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
5644 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
5646 /* convert ipg to nearest number of core clocks */
5647 ipg *= core_ticks_per_usec(adap);
5648 ipg = (ipg + 5000) / 10000;
5649 if (ipg > M_TXTIMERSEPQ0)
5652 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
5653 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
5655 v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
5657 v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
5658 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
5659 t4_read_reg(adap, A_TP_TM_PIO_DATA);
5664 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
5665 * clocks. The formula is
5667 * bytes/s = bytes256 * 256 * ClkFreq / 4096
5669 * which is equivalent to
5671 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
5673 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
5675 u64 v = bytes256 * adap->params.vpd.cclk;
5677 return v * 62 + v / 2;
5681 * t4_get_chan_txrate - get the current per channel Tx rates
5682 * @adap: the adapter
5683 * @nic_rate: rates for NIC traffic
5684 * @ofld_rate: rates for offloaded traffic
5686 * Return the current Tx rates in bytes/s for NIC and offloaded traffic
5689 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
5693 v = t4_read_reg(adap, A_TP_TX_TRATE);
5694 nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
5695 nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
5696 if (adap->chip_params->nchan > 2) {
5697 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
5698 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
5701 v = t4_read_reg(adap, A_TP_TX_ORATE);
5702 ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
5703 ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
5704 if (adap->chip_params->nchan > 2) {
5705 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
5706 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
5711 * t4_set_trace_filter - configure one of the tracing filters
5712 * @adap: the adapter
5713 * @tp: the desired trace filter parameters
5714 * @idx: which filter to configure
5715 * @enable: whether to enable or disable the filter
5717 * Configures one of the tracing filters available in HW. If @tp is %NULL
5718 * it indicates that the filter is already written in the register and it
5719 * just needs to be enabled or disabled.
5721 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
5722 int idx, int enable)
5724 int i, ofst = idx * 4;
5725 u32 data_reg, mask_reg, cfg;
5726 u32 multitrc = F_TRCMULTIFILTER;
5727 u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN;
5729 if (idx < 0 || idx >= NTRACE)
5732 if (tp == NULL || !enable) {
5733 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en,
5739 * TODO - After T4 data book is updated, specify the exact
5742 * See T4 data book - MPS section for a complete description
5743 * of the below if..else handling of A_MPS_TRC_CFG register
5746 cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
5747 if (cfg & F_TRCMULTIFILTER) {
5749 * If multiple tracers are enabled, then maximum
5750 * capture size is 2.5KB (FIFO size of a single channel)
5751 * minus 2 flits for CPL_TRACE_PKT header.
5753 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
5757 * If multiple tracers are disabled, to avoid deadlocks
5758 * maximum packet capture size of 9600 bytes is recommended.
5759 * Also in this mode, only trace0 can be enabled and running.
5762 if (tp->snap_len > 9600 || idx)
5766 if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 ||
5767 tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET ||
5768 tp->min_len > M_TFMINPKTSIZE)
5771 /* stop the tracer we'll be changing */
5772 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0);
5774 idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
5775 data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
5776 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
5778 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5779 t4_write_reg(adap, data_reg, tp->data[i]);
5780 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
5782 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
5783 V_TFCAPTUREMAX(tp->snap_len) |
5784 V_TFMINPKTSIZE(tp->min_len));
5785 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
5786 V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en |
5788 V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) :
5789 V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert)));
5795 * t4_get_trace_filter - query one of the tracing filters
5796 * @adap: the adapter
5797 * @tp: the current trace filter parameters
5798 * @idx: which trace filter to query
5799 * @enabled: non-zero if the filter is enabled
5801 * Returns the current settings of one of the HW tracing filters.
5803 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
5807 int i, ofst = idx * 4;
5808 u32 data_reg, mask_reg;
5810 ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
5811 ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
5814 *enabled = !!(ctla & F_TFEN);
5815 tp->port = G_TFPORT(ctla);
5816 tp->invert = !!(ctla & F_TFINVERTMATCH);
5818 *enabled = !!(ctla & F_T5_TFEN);
5819 tp->port = G_T5_TFPORT(ctla);
5820 tp->invert = !!(ctla & F_T5_TFINVERTMATCH);
5822 tp->snap_len = G_TFCAPTUREMAX(ctlb);
5823 tp->min_len = G_TFMINPKTSIZE(ctlb);
5824 tp->skip_ofst = G_TFOFFSET(ctla);
5825 tp->skip_len = G_TFLENGTH(ctla);
5827 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
5828 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
5829 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
5831 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5832 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
5833 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
5838 * t4_pmtx_get_stats - returns the HW stats from PMTX
5839 * @adap: the adapter
5840 * @cnt: where to store the count statistics
5841 * @cycles: where to store the cycle statistics
5843 * Returns performance statistics from PMTX.
5845 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5850 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
5851 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
5852 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
5854 cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
5856 t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
5857 A_PM_TX_DBG_DATA, data, 2,
5858 A_PM_TX_DBG_STAT_MSB);
5859 cycles[i] = (((u64)data[0] << 32) | data[1]);
5865 * t4_pmrx_get_stats - returns the HW stats from PMRX
5866 * @adap: the adapter
5867 * @cnt: where to store the count statistics
5868 * @cycles: where to store the cycle statistics
5870 * Returns performance statistics from PMRX.
5872 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5877 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
5878 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
5879 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
5881 cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
5883 t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
5884 A_PM_RX_DBG_DATA, data, 2,
5885 A_PM_RX_DBG_STAT_MSB);
5886 cycles[i] = (((u64)data[0] << 32) | data[1]);
5892 * t4_get_mps_bg_map - return the buffer groups associated with a port
5893 * @adap: the adapter
5894 * @idx: the port index
5896 * Returns a bitmap indicating which MPS buffer groups are associated
5897 * with the given port. Bit i is set if buffer group i is used by the
5900 static unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
5902 u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
5905 return idx == 0 ? 0xf : 0;
5906 if (n == 1 && chip_id(adap) <= CHELSIO_T5)
5907 return idx < 2 ? (3 << (2 * idx)) : 0;
5912 * t4_get_port_type_description - return Port Type string description
5913 * @port_type: firmware Port Type enumeration
5915 const char *t4_get_port_type_description(enum fw_port_type port_type)
5917 static const char *const port_type_description[] = {
5942 if (port_type < ARRAY_SIZE(port_type_description))
5943 return port_type_description[port_type];
5948 * t4_get_port_stats_offset - collect port stats relative to a previous
5950 * @adap: The adapter
5952 * @stats: Current stats to fill
5953 * @offset: Previous stats snapshot
5955 void t4_get_port_stats_offset(struct adapter *adap, int idx,
5956 struct port_stats *stats,
5957 struct port_stats *offset)
5962 t4_get_port_stats(adap, idx, stats);
5963 for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
5964 i < (sizeof(struct port_stats)/sizeof(u64)) ;
5970 * t4_get_port_stats - collect port statistics
5971 * @adap: the adapter
5972 * @idx: the port index
5973 * @p: the stats structure to fill
5975 * Collect statistics related to the given port from HW.
5977 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
5979 u32 bgmap = t4_get_mps_bg_map(adap, idx);
5980 u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
5982 #define GET_STAT(name) \
5983 t4_read_reg64(adap, \
5984 (is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
5985 T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
5986 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
5988 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
5989 p->tx_octets = GET_STAT(TX_PORT_BYTES);
5990 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
5991 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
5992 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
5993 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
5994 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
5995 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
5996 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
5997 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
5998 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
5999 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
6000 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
6001 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
6002 p->tx_drop = GET_STAT(TX_PORT_DROP);
6003 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
6004 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
6005 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
6006 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
6007 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
6008 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
6009 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
6010 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
6012 if (chip_id(adap) >= CHELSIO_T5) {
6013 if (stat_ctl & F_COUNTPAUSESTATTX) {
6014 p->tx_frames -= p->tx_pause;
6015 p->tx_octets -= p->tx_pause * 64;
6017 if (stat_ctl & F_COUNTPAUSEMCTX)
6018 p->tx_mcast_frames -= p->tx_pause;
6021 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
6022 p->rx_octets = GET_STAT(RX_PORT_BYTES);
6023 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
6024 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
6025 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
6026 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
6027 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
6028 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
6029 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
6030 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
6031 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
6032 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
6033 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
6034 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
6035 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
6036 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
6037 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
6038 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
6039 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
6040 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
6041 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
6042 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
6043 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
6044 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
6045 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
6046 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
6047 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
6049 if (chip_id(adap) >= CHELSIO_T5) {
6050 if (stat_ctl & F_COUNTPAUSESTATRX) {
6051 p->rx_frames -= p->rx_pause;
6052 p->rx_octets -= p->rx_pause * 64;
6054 if (stat_ctl & F_COUNTPAUSEMCRX)
6055 p->rx_mcast_frames -= p->rx_pause;
6058 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
6059 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
6060 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
6061 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
6062 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
6063 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
6064 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
6065 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
6072 * t4_get_lb_stats - collect loopback port statistics
6073 * @adap: the adapter
6074 * @idx: the loopback port index
6075 * @p: the stats structure to fill
6077 * Return HW statistics for the given loopback port.
6079 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
6081 u32 bgmap = t4_get_mps_bg_map(adap, idx);
6083 #define GET_STAT(name) \
6084 t4_read_reg64(adap, \
6086 PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
6087 T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
6088 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
6090 p->octets = GET_STAT(BYTES);
6091 p->frames = GET_STAT(FRAMES);
6092 p->bcast_frames = GET_STAT(BCAST);
6093 p->mcast_frames = GET_STAT(MCAST);
6094 p->ucast_frames = GET_STAT(UCAST);
6095 p->error_frames = GET_STAT(ERROR);
6097 p->frames_64 = GET_STAT(64B);
6098 p->frames_65_127 = GET_STAT(65B_127B);
6099 p->frames_128_255 = GET_STAT(128B_255B);
6100 p->frames_256_511 = GET_STAT(256B_511B);
6101 p->frames_512_1023 = GET_STAT(512B_1023B);
6102 p->frames_1024_1518 = GET_STAT(1024B_1518B);
6103 p->frames_1519_max = GET_STAT(1519B_MAX);
6104 p->drop = GET_STAT(DROP_FRAMES);
6106 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
6107 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
6108 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
6109 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
6110 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
6111 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
6112 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
6113 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
6120 * t4_wol_magic_enable - enable/disable magic packet WoL
6121 * @adap: the adapter
6122 * @port: the physical port index
6123 * @addr: MAC address expected in magic packets, %NULL to disable
6125 * Enables/disables magic packet wake-on-LAN for the selected port.
6127 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
6130 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
6133 mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO);
6134 mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI);
6135 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
6137 mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO);
6138 mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI);
6139 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
6143 t4_write_reg(adap, mag_id_reg_l,
6144 (addr[2] << 24) | (addr[3] << 16) |
6145 (addr[4] << 8) | addr[5]);
6146 t4_write_reg(adap, mag_id_reg_h,
6147 (addr[0] << 8) | addr[1]);
6149 t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN,
6150 V_MAGICEN(addr != NULL));
6154 * t4_wol_pat_enable - enable/disable pattern-based WoL
6155 * @adap: the adapter
6156 * @port: the physical port index
6157 * @map: bitmap of which HW pattern filters to set
6158 * @mask0: byte mask for bytes 0-63 of a packet
6159 * @mask1: byte mask for bytes 64-127 of a packet
6160 * @crc: Ethernet CRC for selected bytes
6161 * @enable: enable/disable switch
6163 * Sets the pattern filters indicated in @map to mask out the bytes
6164 * specified in @mask0/@mask1 in received packets and compare the CRC of
6165 * the resulting packet against @crc. If @enable is %true pattern-based
6166 * WoL is enabled, otherwise disabled.
6168 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
6169 u64 mask0, u64 mask1, unsigned int crc, bool enable)
6175 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
6177 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
6180 t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
6186 #define EPIO_REG(name) \
6187 (is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \
6188 T5_PORT_REG(port, A_MAC_PORT_EPIO_##name))
6190 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
6191 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
6192 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
6194 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
6198 /* write byte masks */
6199 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
6200 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
6201 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
6202 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
6206 t4_write_reg(adap, EPIO_REG(DATA0), crc);
6207 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
6208 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
6209 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
6214 t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN);
6218 /* t4_mk_filtdelwr - create a delete filter WR
6219 * @ftid: the filter ID
6220 * @wr: the filter work request to populate
6221 * @qid: ingress queue to receive the delete notification
6223 * Creates a filter work request to delete the supplied filter. If @qid is
6224 * negative the delete notification is suppressed.
6226 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
6228 memset(wr, 0, sizeof(*wr));
6229 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
6230 wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
6231 wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
6232 V_FW_FILTER_WR_NOREPLY(qid < 0));
6233 wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
6235 wr->rx_chan_rx_rpl_iq =
6236 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
6239 #define INIT_CMD(var, cmd, rd_wr) do { \
6240 (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
6241 F_FW_CMD_REQUEST | \
6242 F_FW_CMD_##rd_wr); \
6243 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
6246 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
6250 struct fw_ldst_cmd c;
6252 memset(&c, 0, sizeof(c));
6253 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE);
6254 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6258 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6259 c.u.addrval.addr = cpu_to_be32(addr);
6260 c.u.addrval.val = cpu_to_be32(val);
6262 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6266 * t4_mdio_rd - read a PHY register through MDIO
6267 * @adap: the adapter
6268 * @mbox: mailbox to use for the FW command
6269 * @phy_addr: the PHY address
6270 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
6271 * @reg: the register to read
6272 * @valp: where to store the value
6274 * Issues a FW command through the given mailbox to read a PHY register.
6276 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6277 unsigned int mmd, unsigned int reg, unsigned int *valp)
6281 struct fw_ldst_cmd c;
6283 memset(&c, 0, sizeof(c));
6284 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
6285 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6286 F_FW_CMD_REQUEST | F_FW_CMD_READ |
6288 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6289 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
6290 V_FW_LDST_CMD_MMD(mmd));
6291 c.u.mdio.raddr = cpu_to_be16(reg);
6293 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6295 *valp = be16_to_cpu(c.u.mdio.rval);
6300 * t4_mdio_wr - write a PHY register through MDIO
6301 * @adap: the adapter
6302 * @mbox: mailbox to use for the FW command
6303 * @phy_addr: the PHY address
6304 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
6305 * @reg: the register to write
6306 * @valp: value to write
6308 * Issues a FW command through the given mailbox to write a PHY register.
6310 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6311 unsigned int mmd, unsigned int reg, unsigned int val)
6314 struct fw_ldst_cmd c;
6316 memset(&c, 0, sizeof(c));
6317 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
6318 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6319 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
6321 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6322 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
6323 V_FW_LDST_CMD_MMD(mmd));
6324 c.u.mdio.raddr = cpu_to_be16(reg);
6325 c.u.mdio.rval = cpu_to_be16(val);
6327 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6332 * t4_sge_decode_idma_state - decode the idma state
6333 * @adap: the adapter
6334 * @state: the state idma is stuck in
6336 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
6338 static const char * const t4_decode[] = {
6340 "IDMA_PUSH_MORE_CPL_FIFO",
6341 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6343 "IDMA_PHYSADDR_SEND_PCIEHDR",
6344 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6345 "IDMA_PHYSADDR_SEND_PAYLOAD",
6346 "IDMA_SEND_FIFO_TO_IMSG",
6347 "IDMA_FL_REQ_DATA_FL_PREP",
6348 "IDMA_FL_REQ_DATA_FL",
6350 "IDMA_FL_H_REQ_HEADER_FL",
6351 "IDMA_FL_H_SEND_PCIEHDR",
6352 "IDMA_FL_H_PUSH_CPL_FIFO",
6353 "IDMA_FL_H_SEND_CPL",
6354 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6355 "IDMA_FL_H_SEND_IP_HDR",
6356 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6357 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6358 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6359 "IDMA_FL_D_SEND_PCIEHDR",
6360 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6361 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6362 "IDMA_FL_SEND_PCIEHDR",
6363 "IDMA_FL_PUSH_CPL_FIFO",
6365 "IDMA_FL_SEND_PAYLOAD_FIRST",
6366 "IDMA_FL_SEND_PAYLOAD",
6367 "IDMA_FL_REQ_NEXT_DATA_FL",
6368 "IDMA_FL_SEND_NEXT_PCIEHDR",
6369 "IDMA_FL_SEND_PADDING",
6370 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6371 "IDMA_FL_SEND_FIFO_TO_IMSG",
6372 "IDMA_FL_REQ_DATAFL_DONE",
6373 "IDMA_FL_REQ_HEADERFL_DONE",
6375 static const char * const t5_decode[] = {
6378 "IDMA_PUSH_MORE_CPL_FIFO",
6379 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6380 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6381 "IDMA_PHYSADDR_SEND_PCIEHDR",
6382 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6383 "IDMA_PHYSADDR_SEND_PAYLOAD",
6384 "IDMA_SEND_FIFO_TO_IMSG",
6385 "IDMA_FL_REQ_DATA_FL",
6387 "IDMA_FL_DROP_SEND_INC",
6388 "IDMA_FL_H_REQ_HEADER_FL",
6389 "IDMA_FL_H_SEND_PCIEHDR",
6390 "IDMA_FL_H_PUSH_CPL_FIFO",
6391 "IDMA_FL_H_SEND_CPL",
6392 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6393 "IDMA_FL_H_SEND_IP_HDR",
6394 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6395 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6396 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6397 "IDMA_FL_D_SEND_PCIEHDR",
6398 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6399 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6400 "IDMA_FL_SEND_PCIEHDR",
6401 "IDMA_FL_PUSH_CPL_FIFO",
6403 "IDMA_FL_SEND_PAYLOAD_FIRST",
6404 "IDMA_FL_SEND_PAYLOAD",
6405 "IDMA_FL_REQ_NEXT_DATA_FL",
6406 "IDMA_FL_SEND_NEXT_PCIEHDR",
6407 "IDMA_FL_SEND_PADDING",
6408 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6410 static const char * const t6_decode[] = {
6412 "IDMA_PUSH_MORE_CPL_FIFO",
6413 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6414 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6415 "IDMA_PHYSADDR_SEND_PCIEHDR",
6416 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6417 "IDMA_PHYSADDR_SEND_PAYLOAD",
6418 "IDMA_FL_REQ_DATA_FL",
6420 "IDMA_FL_DROP_SEND_INC",
6421 "IDMA_FL_H_REQ_HEADER_FL",
6422 "IDMA_FL_H_SEND_PCIEHDR",
6423 "IDMA_FL_H_PUSH_CPL_FIFO",
6424 "IDMA_FL_H_SEND_CPL",
6425 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6426 "IDMA_FL_H_SEND_IP_HDR",
6427 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6428 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6429 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6430 "IDMA_FL_D_SEND_PCIEHDR",
6431 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6432 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6433 "IDMA_FL_SEND_PCIEHDR",
6434 "IDMA_FL_PUSH_CPL_FIFO",
6436 "IDMA_FL_SEND_PAYLOAD_FIRST",
6437 "IDMA_FL_SEND_PAYLOAD",
6438 "IDMA_FL_REQ_NEXT_DATA_FL",
6439 "IDMA_FL_SEND_NEXT_PCIEHDR",
6440 "IDMA_FL_SEND_PADDING",
6441 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6443 static const u32 sge_regs[] = {
6444 A_SGE_DEBUG_DATA_LOW_INDEX_2,
6445 A_SGE_DEBUG_DATA_LOW_INDEX_3,
6446 A_SGE_DEBUG_DATA_HIGH_INDEX_10,
6448 const char * const *sge_idma_decode;
6449 int sge_idma_decode_nstates;
6451 unsigned int chip_version = chip_id(adapter);
6453 /* Select the right set of decode strings to dump depending on the
6454 * adapter chip type.
6456 switch (chip_version) {
6458 sge_idma_decode = (const char * const *)t4_decode;
6459 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
6463 sge_idma_decode = (const char * const *)t5_decode;
6464 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
6468 sge_idma_decode = (const char * const *)t6_decode;
6469 sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
6473 CH_ERR(adapter, "Unsupported chip version %d\n", chip_version);
6477 if (state < sge_idma_decode_nstates)
6478 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
6480 CH_WARN(adapter, "idma state %d unknown\n", state);
6482 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
6483 CH_WARN(adapter, "SGE register %#x value %#x\n",
6484 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
6488 * t4_sge_ctxt_flush - flush the SGE context cache
6489 * @adap: the adapter
6490 * @mbox: mailbox to use for the FW command
6492 * Issues a FW command through the given mailbox to flush the
6493 * SGE context cache.
6495 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
6499 struct fw_ldst_cmd c;
6501 memset(&c, 0, sizeof(c));
6502 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC);
6503 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6504 F_FW_CMD_REQUEST | F_FW_CMD_READ |
6506 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6507 c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH);
6509 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6514 * t4_fw_hello - establish communication with FW
6515 * @adap: the adapter
6516 * @mbox: mailbox to use for the FW command
6517 * @evt_mbox: mailbox to receive async FW events
6518 * @master: specifies the caller's willingness to be the device master
6519 * @state: returns the current device state (if non-NULL)
6521 * Issues a command to establish communication with FW. Returns either
6522 * an error (negative integer) or the mailbox of the Master PF.
6524 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
6525 enum dev_master master, enum dev_state *state)
6528 struct fw_hello_cmd c;
6530 unsigned int master_mbox;
6531 int retries = FW_CMD_HELLO_RETRIES;
6534 memset(&c, 0, sizeof(c));
6535 INIT_CMD(c, HELLO, WRITE);
6536 c.err_to_clearinit = cpu_to_be32(
6537 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
6538 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
6539 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ?
6540 mbox : M_FW_HELLO_CMD_MBMASTER) |
6541 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
6542 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
6543 F_FW_HELLO_CMD_CLEARINIT);
6546 * Issue the HELLO command to the firmware. If it's not successful
6547 * but indicates that we got a "busy" or "timeout" condition, retry
6548 * the HELLO until we exhaust our retry limit. If we do exceed our
6549 * retry limit, check to see if the firmware left us any error
6550 * information and report that if so ...
6552 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6553 if (ret != FW_SUCCESS) {
6554 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
6556 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
6557 t4_report_fw_error(adap);
6561 v = be32_to_cpu(c.err_to_clearinit);
6562 master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
6564 if (v & F_FW_HELLO_CMD_ERR)
6565 *state = DEV_STATE_ERR;
6566 else if (v & F_FW_HELLO_CMD_INIT)
6567 *state = DEV_STATE_INIT;
6569 *state = DEV_STATE_UNINIT;
6573 * If we're not the Master PF then we need to wait around for the
6574 * Master PF Driver to finish setting up the adapter.
6576 * Note that we also do this wait if we're a non-Master-capable PF and
6577 * there is no current Master PF; a Master PF may show up momentarily
6578 * and we wouldn't want to fail pointlessly. (This can happen when an
6579 * OS loads lots of different drivers rapidly at the same time). In
6580 * this case, the Master PF returned by the firmware will be
6581 * M_PCIE_FW_MASTER so the test below will work ...
6583 if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
6584 master_mbox != mbox) {
6585 int waiting = FW_CMD_HELLO_TIMEOUT;
6588 * Wait for the firmware to either indicate an error or
6589 * initialized state. If we see either of these we bail out
6590 * and report the issue to the caller. If we exhaust the
6591 * "hello timeout" and we haven't exhausted our retries, try
6592 * again. Otherwise bail with a timeout error.
6601 * If neither Error nor Initialialized are indicated
6602 * by the firmware keep waiting till we exhaust our
6603 * timeout ... and then retry if we haven't exhausted
6606 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
6607 if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
6618 * We either have an Error or Initialized condition
6619 * report errors preferentially.
6622 if (pcie_fw & F_PCIE_FW_ERR)
6623 *state = DEV_STATE_ERR;
6624 else if (pcie_fw & F_PCIE_FW_INIT)
6625 *state = DEV_STATE_INIT;
6629 * If we arrived before a Master PF was selected and
6630 * there's not a valid Master PF, grab its identity
6633 if (master_mbox == M_PCIE_FW_MASTER &&
6634 (pcie_fw & F_PCIE_FW_MASTER_VLD))
6635 master_mbox = G_PCIE_FW_MASTER(pcie_fw);
6644 * t4_fw_bye - end communication with FW
6645 * @adap: the adapter
6646 * @mbox: mailbox to use for the FW command
6648 * Issues a command to terminate communication with FW.
6650 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
6652 struct fw_bye_cmd c;
6654 memset(&c, 0, sizeof(c));
6655 INIT_CMD(c, BYE, WRITE);
6656 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6660 * t4_fw_reset - issue a reset to FW
6661 * @adap: the adapter
6662 * @mbox: mailbox to use for the FW command
6663 * @reset: specifies the type of reset to perform
6665 * Issues a reset command of the specified type to FW.
6667 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
6669 struct fw_reset_cmd c;
6671 memset(&c, 0, sizeof(c));
6672 INIT_CMD(c, RESET, WRITE);
6673 c.val = cpu_to_be32(reset);
6674 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6678 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
6679 * @adap: the adapter
6680 * @mbox: mailbox to use for the FW RESET command (if desired)
6681 * @force: force uP into RESET even if FW RESET command fails
6683 * Issues a RESET command to firmware (if desired) with a HALT indication
6684 * and then puts the microprocessor into RESET state. The RESET command
6685 * will only be issued if a legitimate mailbox is provided (mbox <=
6686 * M_PCIE_FW_MASTER).
6688 * This is generally used in order for the host to safely manipulate the
6689 * adapter without fear of conflicting with whatever the firmware might
6690 * be doing. The only way out of this state is to RESTART the firmware
6693 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
6698 * If a legitimate mailbox is provided, issue a RESET command
6699 * with a HALT indication.
6701 if (mbox <= M_PCIE_FW_MASTER) {
6702 struct fw_reset_cmd c;
6704 memset(&c, 0, sizeof(c));
6705 INIT_CMD(c, RESET, WRITE);
6706 c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
6707 c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
6708 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6712 * Normally we won't complete the operation if the firmware RESET
6713 * command fails but if our caller insists we'll go ahead and put the
6714 * uP into RESET. This can be useful if the firmware is hung or even
6715 * missing ... We'll have to take the risk of putting the uP into
6716 * RESET without the cooperation of firmware in that case.
6718 * We also force the firmware's HALT flag to be on in case we bypassed
6719 * the firmware RESET command above or we're dealing with old firmware
6720 * which doesn't have the HALT capability. This will serve as a flag
6721 * for the incoming firmware to know that it's coming out of a HALT
6722 * rather than a RESET ... if it's new enough to understand that ...
6724 if (ret == 0 || force) {
6725 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
6726 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
6731 * And we always return the result of the firmware RESET command
6732 * even when we force the uP into RESET ...
6738 * t4_fw_restart - restart the firmware by taking the uP out of RESET
6739 * @adap: the adapter
6740 * @reset: if we want to do a RESET to restart things
6742 * Restart firmware previously halted by t4_fw_halt(). On successful
6743 * return the previous PF Master remains as the new PF Master and there
6744 * is no need to issue a new HELLO command, etc.
6746 * We do this in two ways:
6748 * 1. If we're dealing with newer firmware we'll simply want to take
6749 * the chip's microprocessor out of RESET. This will cause the
6750 * firmware to start up from its start vector. And then we'll loop
6751 * until the firmware indicates it's started again (PCIE_FW.HALT
6752 * reset to 0) or we timeout.
6754 * 2. If we're dealing with older firmware then we'll need to RESET
6755 * the chip since older firmware won't recognize the PCIE_FW.HALT
6756 * flag and automatically RESET itself on startup.
6758 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
6762 * Since we're directing the RESET instead of the firmware
6763 * doing it automatically, we need to clear the PCIE_FW.HALT
6766 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
6769 * If we've been given a valid mailbox, first try to get the
6770 * firmware to do the RESET. If that works, great and we can
6771 * return success. Otherwise, if we haven't been given a
6772 * valid mailbox or the RESET command failed, fall back to
6773 * hitting the chip with a hammer.
6775 if (mbox <= M_PCIE_FW_MASTER) {
6776 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
6778 if (t4_fw_reset(adap, mbox,
6779 F_PIORST | F_PIORSTMODE) == 0)
6783 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
6788 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
6789 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
6790 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
6801 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
6802 * @adap: the adapter
6803 * @mbox: mailbox to use for the FW RESET command (if desired)
6804 * @fw_data: the firmware image to write
6806 * @force: force upgrade even if firmware doesn't cooperate
6808 * Perform all of the steps necessary for upgrading an adapter's
6809 * firmware image. Normally this requires the cooperation of the
6810 * existing firmware in order to halt all existing activities
6811 * but if an invalid mailbox token is passed in we skip that step
6812 * (though we'll still put the adapter microprocessor into RESET in
6815 * On successful return the new firmware will have been loaded and
6816 * the adapter will have been fully RESET losing all previous setup
6817 * state. On unsuccessful return the adapter may be completely hosed ...
6818 * positive errno indicates that the adapter is ~probably~ intact, a
6819 * negative errno indicates that things are looking bad ...
6821 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
6822 const u8 *fw_data, unsigned int size, int force)
6824 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
6825 unsigned int bootstrap =
6826 be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
6829 if (!t4_fw_matches_chip(adap, fw_hdr))
6833 ret = t4_fw_halt(adap, mbox, force);
6834 if (ret < 0 && !force)
6838 ret = t4_load_fw(adap, fw_data, size);
6839 if (ret < 0 || bootstrap)
6843 * Older versions of the firmware don't understand the new
6844 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
6845 * restart. So for newly loaded older firmware we'll have to do the
6846 * RESET for it so it starts up on a clean slate. We can tell if
6847 * the newly loaded firmware will handle this right by checking
6848 * its header flags to see if it advertises the capability.
6850 reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
6851 return t4_fw_restart(adap, mbox, reset);
6855 * t4_fw_initialize - ask FW to initialize the device
6856 * @adap: the adapter
6857 * @mbox: mailbox to use for the FW command
6859 * Issues a command to FW to partially initialize the device. This
6860 * performs initialization that generally doesn't depend on user input.
6862 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
6864 struct fw_initialize_cmd c;
6866 memset(&c, 0, sizeof(c));
6867 INIT_CMD(c, INITIALIZE, WRITE);
6868 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6872 * t4_query_params_rw - query FW or device parameters
6873 * @adap: the adapter
6874 * @mbox: mailbox to use for the FW command
6877 * @nparams: the number of parameters
6878 * @params: the parameter names
6879 * @val: the parameter values
6880 * @rw: Write and read flag
6882 * Reads the value of FW or device parameters. Up to 7 parameters can be
6885 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
6886 unsigned int vf, unsigned int nparams, const u32 *params,
6890 struct fw_params_cmd c;
6891 __be32 *p = &c.param[0].mnem;
6896 memset(&c, 0, sizeof(c));
6897 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
6898 F_FW_CMD_REQUEST | F_FW_CMD_READ |
6899 V_FW_PARAMS_CMD_PFN(pf) |
6900 V_FW_PARAMS_CMD_VFN(vf));
6901 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6903 for (i = 0; i < nparams; i++) {
6904 *p++ = cpu_to_be32(*params++);
6906 *p = cpu_to_be32(*(val + i));
6910 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6912 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
6913 *val++ = be32_to_cpu(*p);
6917 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
6918 unsigned int vf, unsigned int nparams, const u32 *params,
6921 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
6925 * t4_set_params_timeout - sets FW or device parameters
6926 * @adap: the adapter
6927 * @mbox: mailbox to use for the FW command
6930 * @nparams: the number of parameters
6931 * @params: the parameter names
6932 * @val: the parameter values
6933 * @timeout: the timeout time
6935 * Sets the value of FW or device parameters. Up to 7 parameters can be
6936 * specified at once.
6938 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
6939 unsigned int pf, unsigned int vf,
6940 unsigned int nparams, const u32 *params,
6941 const u32 *val, int timeout)
6943 struct fw_params_cmd c;
6944 __be32 *p = &c.param[0].mnem;
6949 memset(&c, 0, sizeof(c));
6950 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
6951 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
6952 V_FW_PARAMS_CMD_PFN(pf) |
6953 V_FW_PARAMS_CMD_VFN(vf));
6954 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6957 *p++ = cpu_to_be32(*params++);
6958 *p++ = cpu_to_be32(*val++);
6961 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
6965 * t4_set_params - sets FW or device parameters
6966 * @adap: the adapter
6967 * @mbox: mailbox to use for the FW command
6970 * @nparams: the number of parameters
6971 * @params: the parameter names
6972 * @val: the parameter values
6974 * Sets the value of FW or device parameters. Up to 7 parameters can be
6975 * specified at once.
6977 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
6978 unsigned int vf, unsigned int nparams, const u32 *params,
6981 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
6982 FW_CMD_MAX_TIMEOUT);
6986 * t4_cfg_pfvf - configure PF/VF resource limits
6987 * @adap: the adapter
6988 * @mbox: mailbox to use for the FW command
6989 * @pf: the PF being configured
6990 * @vf: the VF being configured
6991 * @txq: the max number of egress queues
6992 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
6993 * @rxqi: the max number of interrupt-capable ingress queues
6994 * @rxq: the max number of interruptless ingress queues
6995 * @tc: the PCI traffic class
6996 * @vi: the max number of virtual interfaces
6997 * @cmask: the channel access rights mask for the PF/VF
6998 * @pmask: the port access rights mask for the PF/VF
6999 * @nexact: the maximum number of exact MPS filters
7000 * @rcaps: read capabilities
7001 * @wxcaps: write/execute capabilities
7003 * Configures resource limits and capabilities for a physical or virtual
7006 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
7007 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
7008 unsigned int rxqi, unsigned int rxq, unsigned int tc,
7009 unsigned int vi, unsigned int cmask, unsigned int pmask,
7010 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
7012 struct fw_pfvf_cmd c;
7014 memset(&c, 0, sizeof(c));
7015 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
7016 F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
7017 V_FW_PFVF_CMD_VFN(vf));
7018 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7019 c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
7020 V_FW_PFVF_CMD_NIQ(rxq));
7021 c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) |
7022 V_FW_PFVF_CMD_PMASK(pmask) |
7023 V_FW_PFVF_CMD_NEQ(txq));
7024 c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) |
7025 V_FW_PFVF_CMD_NVI(vi) |
7026 V_FW_PFVF_CMD_NEXACTF(nexact));
7027 c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) |
7028 V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
7029 V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
7030 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7034 * t4_alloc_vi_func - allocate a virtual interface
7035 * @adap: the adapter
7036 * @mbox: mailbox to use for the FW command
7037 * @port: physical port associated with the VI
7038 * @pf: the PF owning the VI
7039 * @vf: the VF owning the VI
7040 * @nmac: number of MAC addresses needed (1 to 5)
7041 * @mac: the MAC addresses of the VI
7042 * @rss_size: size of RSS table slice associated with this VI
7043 * @portfunc: which Port Application Function MAC Address is desired
7044 * @idstype: Intrusion Detection Type
7046 * Allocates a virtual interface for the given physical port. If @mac is
7047 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
7048 * If @rss_size is %NULL the VI is not assigned any RSS slice by FW.
7049 * @mac should be large enough to hold @nmac Ethernet addresses, they are
7050 * stored consecutively so the space needed is @nmac * 6 bytes.
7051 * Returns a negative error number or the non-negative VI id.
7053 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
7054 unsigned int port, unsigned int pf, unsigned int vf,
7055 unsigned int nmac, u8 *mac, u16 *rss_size,
7056 unsigned int portfunc, unsigned int idstype)
7061 memset(&c, 0, sizeof(c));
7062 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
7063 F_FW_CMD_WRITE | F_FW_CMD_EXEC |
7064 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
7065 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
7066 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
7067 V_FW_VI_CMD_FUNC(portfunc));
7068 c.portid_pkd = V_FW_VI_CMD_PORTID(port);
7071 c.norss_rsssize = F_FW_VI_CMD_NORSS;
7073 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7078 memcpy(mac, c.mac, sizeof(c.mac));
7081 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
7083 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
7085 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
7087 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
7091 *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
7092 return G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid));
7096 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface
7097 * @adap: the adapter
7098 * @mbox: mailbox to use for the FW command
7099 * @port: physical port associated with the VI
7100 * @pf: the PF owning the VI
7101 * @vf: the VF owning the VI
7102 * @nmac: number of MAC addresses needed (1 to 5)
7103 * @mac: the MAC addresses of the VI
7104 * @rss_size: size of RSS table slice associated with this VI
7106 * backwards compatible and convieniance routine to allocate a Virtual
7107 * Interface with a Ethernet Port Application Function and Intrustion
7108 * Detection System disabled.
7110 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
7111 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
7114 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
7119 * t4_free_vi - free a virtual interface
7120 * @adap: the adapter
7121 * @mbox: mailbox to use for the FW command
7122 * @pf: the PF owning the VI
7123 * @vf: the VF owning the VI
7124 * @viid: virtual interface identifiler
7126 * Free a previously allocated virtual interface.
7128 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
7129 unsigned int vf, unsigned int viid)
7133 memset(&c, 0, sizeof(c));
7134 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
7137 V_FW_VI_CMD_PFN(pf) |
7138 V_FW_VI_CMD_VFN(vf));
7139 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
7140 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
7142 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7146 * t4_set_rxmode - set Rx properties of a virtual interface
7147 * @adap: the adapter
7148 * @mbox: mailbox to use for the FW command
7150 * @mtu: the new MTU or -1
7151 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
7152 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
7153 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
7154 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
7155 * @sleep_ok: if true we may sleep while awaiting command completion
7157 * Sets Rx properties of a virtual interface.
7159 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
7160 int mtu, int promisc, int all_multi, int bcast, int vlanex,
7163 struct fw_vi_rxmode_cmd c;
7165 /* convert to FW values */
7167 mtu = M_FW_VI_RXMODE_CMD_MTU;
7169 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
7171 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
7173 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
7175 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
7177 memset(&c, 0, sizeof(c));
7178 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
7179 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7180 V_FW_VI_RXMODE_CMD_VIID(viid));
7181 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7183 cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
7184 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
7185 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
7186 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
7187 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
7188 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7192 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
7193 * @adap: the adapter
7194 * @mbox: mailbox to use for the FW command
7196 * @free: if true any existing filters for this VI id are first removed
7197 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
7198 * @addr: the MAC address(es)
7199 * @idx: where to store the index of each allocated filter
7200 * @hash: pointer to hash address filter bitmap
7201 * @sleep_ok: call is allowed to sleep
7203 * Allocates an exact-match filter for each of the supplied addresses and
7204 * sets it to the corresponding address. If @idx is not %NULL it should
7205 * have at least @naddr entries, each of which will be set to the index of
7206 * the filter allocated for the corresponding MAC address. If a filter
7207 * could not be allocated for an address its index is set to 0xffff.
7208 * If @hash is not %NULL addresses that fail to allocate an exact filter
7209 * are hashed and update the hash filter bitmap pointed at by @hash.
7211 * Returns a negative error number or the number of filters allocated.
7213 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
7214 unsigned int viid, bool free, unsigned int naddr,
7215 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
7217 int offset, ret = 0;
7218 struct fw_vi_mac_cmd c;
7219 unsigned int nfilters = 0;
7220 unsigned int max_naddr = adap->chip_params->mps_tcam_size;
7221 unsigned int rem = naddr;
7223 if (naddr > max_naddr)
7226 for (offset = 0; offset < naddr ; /**/) {
7227 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
7229 : ARRAY_SIZE(c.u.exact));
7230 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
7231 u.exact[fw_naddr]), 16);
7232 struct fw_vi_mac_exact *p;
7235 memset(&c, 0, sizeof(c));
7236 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
7239 V_FW_CMD_EXEC(free) |
7240 V_FW_VI_MAC_CMD_VIID(viid));
7241 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) |
7242 V_FW_CMD_LEN16(len16));
7244 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7246 cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
7247 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
7248 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
7252 * It's okay if we run out of space in our MAC address arena.
7253 * Some of the addresses we submit may get stored so we need
7254 * to run through the reply to see what the results were ...
7256 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7257 if (ret && ret != -FW_ENOMEM)
7260 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7261 u16 index = G_FW_VI_MAC_CMD_IDX(
7262 be16_to_cpu(p->valid_to_idx));
7265 idx[offset+i] = (index >= max_naddr
7268 if (index < max_naddr)
7271 *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
7279 if (ret == 0 || ret == -FW_ENOMEM)
7285 * t4_change_mac - modifies the exact-match filter for a MAC address
7286 * @adap: the adapter
7287 * @mbox: mailbox to use for the FW command
7289 * @idx: index of existing filter for old value of MAC address, or -1
7290 * @addr: the new MAC address value
7291 * @persist: whether a new MAC allocation should be persistent
7292 * @add_smt: if true also add the address to the HW SMT
7294 * Modifies an exact-match filter and sets it to the new MAC address if
7295 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
7296 * latter case the address is added persistently if @persist is %true.
7298 * Note that in general it is not possible to modify the value of a given
7299 * filter so the generic way to modify an address filter is to free the one
7300 * being used by the old address value and allocate a new filter for the
7301 * new address value.
7303 * Returns a negative error number or the index of the filter with the new
7304 * MAC value. Note that this index may differ from @idx.
7306 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
7307 int idx, const u8 *addr, bool persist, bool add_smt)
7310 struct fw_vi_mac_cmd c;
7311 struct fw_vi_mac_exact *p = c.u.exact;
7312 unsigned int max_mac_addr = adap->chip_params->mps_tcam_size;
7314 if (idx < 0) /* new allocation */
7315 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
7316 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
7318 memset(&c, 0, sizeof(c));
7319 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
7320 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7321 V_FW_VI_MAC_CMD_VIID(viid));
7322 c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1));
7323 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
7324 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
7325 V_FW_VI_MAC_CMD_IDX(idx));
7326 memcpy(p->macaddr, addr, sizeof(p->macaddr));
7328 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7330 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
7331 if (ret >= max_mac_addr)
7338 * t4_set_addr_hash - program the MAC inexact-match hash filter
7339 * @adap: the adapter
7340 * @mbox: mailbox to use for the FW command
7342 * @ucast: whether the hash filter should also match unicast addresses
7343 * @vec: the value to be written to the hash filter
7344 * @sleep_ok: call is allowed to sleep
7346 * Sets the 64-bit inexact-match hash filter for a virtual interface.
7348 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
7349 bool ucast, u64 vec, bool sleep_ok)
7351 struct fw_vi_mac_cmd c;
7354 memset(&c, 0, sizeof(c));
7355 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
7356 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7357 V_FW_VI_ENABLE_CMD_VIID(viid));
7358 val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) |
7359 V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1);
7360 c.freemacs_to_len16 = cpu_to_be32(val);
7361 c.u.hash.hashvec = cpu_to_be64(vec);
7362 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7366 * t4_enable_vi_params - enable/disable a virtual interface
7367 * @adap: the adapter
7368 * @mbox: mailbox to use for the FW command
7370 * @rx_en: 1=enable Rx, 0=disable Rx
7371 * @tx_en: 1=enable Tx, 0=disable Tx
7372 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
7374 * Enables/disables a virtual interface. Note that setting DCB Enable
7375 * only makes sense when enabling a Virtual Interface ...
7377 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
7378 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
7380 struct fw_vi_enable_cmd c;
7382 memset(&c, 0, sizeof(c));
7383 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
7384 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7385 V_FW_VI_ENABLE_CMD_VIID(viid));
7386 c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
7387 V_FW_VI_ENABLE_CMD_EEN(tx_en) |
7388 V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
7390 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
7394 * t4_enable_vi - enable/disable a virtual interface
7395 * @adap: the adapter
7396 * @mbox: mailbox to use for the FW command
7398 * @rx_en: 1=enable Rx, 0=disable Rx
7399 * @tx_en: 1=enable Tx, 0=disable Tx
7401 * Enables/disables a virtual interface. Note that setting DCB Enable
7402 * only makes sense when enabling a Virtual Interface ...
7404 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
7405 bool rx_en, bool tx_en)
7407 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
7411 * t4_identify_port - identify a VI's port by blinking its LED
7412 * @adap: the adapter
7413 * @mbox: mailbox to use for the FW command
7415 * @nblinks: how many times to blink LED at 2.5 Hz
7417 * Identifies a VI's port by blinking its LED.
7419 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
7420 unsigned int nblinks)
7422 struct fw_vi_enable_cmd c;
7424 memset(&c, 0, sizeof(c));
7425 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
7426 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7427 V_FW_VI_ENABLE_CMD_VIID(viid));
7428 c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
7429 c.blinkdur = cpu_to_be16(nblinks);
7430 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7434 * t4_iq_stop - stop an ingress queue and its FLs
7435 * @adap: the adapter
7436 * @mbox: mailbox to use for the FW command
7437 * @pf: the PF owning the queues
7438 * @vf: the VF owning the queues
7439 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
7440 * @iqid: ingress queue id
7441 * @fl0id: FL0 queue id or 0xffff if no attached FL0
7442 * @fl1id: FL1 queue id or 0xffff if no attached FL1
7444 * Stops an ingress queue and its associated FLs, if any. This causes
7445 * any current or future data/messages destined for these queues to be
7448 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
7449 unsigned int vf, unsigned int iqtype, unsigned int iqid,
7450 unsigned int fl0id, unsigned int fl1id)
7454 memset(&c, 0, sizeof(c));
7455 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
7456 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
7457 V_FW_IQ_CMD_VFN(vf));
7458 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c));
7459 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
7460 c.iqid = cpu_to_be16(iqid);
7461 c.fl0id = cpu_to_be16(fl0id);
7462 c.fl1id = cpu_to_be16(fl1id);
7463 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7467 * t4_iq_free - free an ingress queue and its FLs
7468 * @adap: the adapter
7469 * @mbox: mailbox to use for the FW command
7470 * @pf: the PF owning the queues
7471 * @vf: the VF owning the queues
7472 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
7473 * @iqid: ingress queue id
7474 * @fl0id: FL0 queue id or 0xffff if no attached FL0
7475 * @fl1id: FL1 queue id or 0xffff if no attached FL1
7477 * Frees an ingress queue and its associated FLs, if any.
7479 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7480 unsigned int vf, unsigned int iqtype, unsigned int iqid,
7481 unsigned int fl0id, unsigned int fl1id)
7485 memset(&c, 0, sizeof(c));
7486 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
7487 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
7488 V_FW_IQ_CMD_VFN(vf));
7489 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
7490 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
7491 c.iqid = cpu_to_be16(iqid);
7492 c.fl0id = cpu_to_be16(fl0id);
7493 c.fl1id = cpu_to_be16(fl1id);
7494 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7498 * t4_eth_eq_free - free an Ethernet egress queue
7499 * @adap: the adapter
7500 * @mbox: mailbox to use for the FW command
7501 * @pf: the PF owning the queue
7502 * @vf: the VF owning the queue
7503 * @eqid: egress queue id
7505 * Frees an Ethernet egress queue.
7507 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7508 unsigned int vf, unsigned int eqid)
7510 struct fw_eq_eth_cmd c;
7512 memset(&c, 0, sizeof(c));
7513 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
7514 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7515 V_FW_EQ_ETH_CMD_PFN(pf) |
7516 V_FW_EQ_ETH_CMD_VFN(vf));
7517 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
7518 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
7519 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7523 * t4_ctrl_eq_free - free a control egress queue
7524 * @adap: the adapter
7525 * @mbox: mailbox to use for the FW command
7526 * @pf: the PF owning the queue
7527 * @vf: the VF owning the queue
7528 * @eqid: egress queue id
7530 * Frees a control egress queue.
7532 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7533 unsigned int vf, unsigned int eqid)
7535 struct fw_eq_ctrl_cmd c;
7537 memset(&c, 0, sizeof(c));
7538 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
7539 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7540 V_FW_EQ_CTRL_CMD_PFN(pf) |
7541 V_FW_EQ_CTRL_CMD_VFN(vf));
7542 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
7543 c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
7544 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7548 * t4_ofld_eq_free - free an offload egress queue
7549 * @adap: the adapter
7550 * @mbox: mailbox to use for the FW command
7551 * @pf: the PF owning the queue
7552 * @vf: the VF owning the queue
7553 * @eqid: egress queue id
7555 * Frees a control egress queue.
7557 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7558 unsigned int vf, unsigned int eqid)
7560 struct fw_eq_ofld_cmd c;
7562 memset(&c, 0, sizeof(c));
7563 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) |
7564 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7565 V_FW_EQ_OFLD_CMD_PFN(pf) |
7566 V_FW_EQ_OFLD_CMD_VFN(vf));
7567 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
7568 c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid));
7569 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7573 * t4_link_down_rc_str - return a string for a Link Down Reason Code
7574 * @link_down_rc: Link Down Reason Code
7576 * Returns a string representation of the Link Down Reason Code.
7578 const char *t4_link_down_rc_str(unsigned char link_down_rc)
7580 static const char *reason[] = {
7583 "Auto-negotiation Failure",
7585 "Insufficient Airflow",
7586 "Unable To Determine Reason",
7587 "No RX Signal Detected",
7591 if (link_down_rc >= ARRAY_SIZE(reason))
7592 return "Bad Reason Code";
7594 return reason[link_down_rc];
7598 * Updates all fields owned by the common code in port_info and link_config
7599 * based on information provided by the firmware. Does not touch any
7600 * requested_* field.
7602 static void handle_port_info(struct port_info *pi, const struct fw_port_info *p)
7604 struct link_config *lc = &pi->link_cfg;
7606 unsigned char fc, fec;
7607 u32 stat = be32_to_cpu(p->lstatus_to_modtype);
7609 pi->port_type = G_FW_PORT_CMD_PTYPE(stat);
7610 pi->mod_type = G_FW_PORT_CMD_MODTYPE(stat);
7611 pi->mdio_addr = stat & F_FW_PORT_CMD_MDIOCAP ?
7612 G_FW_PORT_CMD_MDIOADDR(stat) : -1;
7614 lc->supported = be16_to_cpu(p->pcap);
7615 lc->advertising = be16_to_cpu(p->acap);
7616 lc->lp_advertising = be16_to_cpu(p->lpacap);
7617 lc->link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
7618 lc->link_down_rc = G_FW_PORT_CMD_LINKDNRC(stat);
7621 if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
7623 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
7625 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
7627 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
7629 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
7631 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
7636 if (stat & F_FW_PORT_CMD_RXPAUSE)
7638 if (stat & F_FW_PORT_CMD_TXPAUSE)
7643 if (lc->advertising & FW_PORT_CAP_FEC_RS)
7645 else if (lc->advertising & FW_PORT_CAP_FEC_BASER_RS)
7651 * t4_update_port_info - retrieve and update port information if changed
7652 * @pi: the port_info
7654 * We issue a Get Port Information Command to the Firmware and, if
7655 * successful, we check to see if anything is different from what we
7656 * last recorded and update things accordingly.
7658 int t4_update_port_info(struct port_info *pi)
7660 struct fw_port_cmd port_cmd;
7663 memset(&port_cmd, 0, sizeof port_cmd);
7664 port_cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
7665 F_FW_CMD_REQUEST | F_FW_CMD_READ |
7666 V_FW_PORT_CMD_PORTID(pi->tx_chan));
7667 port_cmd.action_to_len16 = cpu_to_be32(
7668 V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
7669 FW_LEN16(port_cmd));
7670 ret = t4_wr_mbox_ns(pi->adapter, pi->adapter->mbox,
7671 &port_cmd, sizeof(port_cmd), &port_cmd);
7675 handle_port_info(pi, &port_cmd.u.info);
7680 * t4_handle_fw_rpl - process a FW reply message
7681 * @adap: the adapter
7682 * @rpl: start of the FW message
7684 * Processes a FW message, such as link state change messages.
7686 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
7688 u8 opcode = *(const u8 *)rpl;
7689 const struct fw_port_cmd *p = (const void *)rpl;
7690 unsigned int action =
7691 G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
7693 if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
7694 /* link/module state change message */
7695 int i, old_ptype, old_mtype;
7696 int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
7697 struct port_info *pi = NULL;
7698 struct link_config *lc, *old_lc;
7700 for_each_port(adap, i) {
7701 pi = adap2pinfo(adap, i);
7702 if (pi->tx_chan == chan)
7708 old_lc = &pi->old_link_cfg;
7709 old_ptype = pi->port_type;
7710 old_mtype = pi->mod_type;
7711 handle_port_info(pi, &p->u.info);
7713 if (old_ptype != pi->port_type || old_mtype != pi->mod_type) {
7714 t4_os_portmod_changed(pi);
7717 if (old_lc->link_ok != lc->link_ok ||
7718 old_lc->speed != lc->speed ||
7719 old_lc->fec != lc->fec ||
7720 old_lc->fc != lc->fc) {
7721 t4_os_link_changed(pi);
7726 CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode);
7733 * get_pci_mode - determine a card's PCI mode
7734 * @adapter: the adapter
7735 * @p: where to store the PCI settings
7737 * Determines a card's PCI mode and associated parameters, such as speed
7740 static void get_pci_mode(struct adapter *adapter,
7741 struct pci_params *p)
7746 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
7748 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
7749 p->speed = val & PCI_EXP_LNKSTA_CLS;
7750 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
7755 u32 vendor_and_model_id;
7759 int t4_get_flash_params(struct adapter *adapter)
7762 * Table for non-standard supported Flash parts. Note, all Flash
7763 * parts must have 64KB sectors.
7765 static struct flash_desc supported_flash[] = {
7766 { 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
7771 unsigned int part, manufacturer;
7772 unsigned int density, size = 0;
7776 * Issue a Read ID Command to the Flash part. We decode supported
7777 * Flash parts and their sizes from this. There's a newer Query
7778 * Command which can retrieve detailed geometry information but many
7779 * Flash parts don't support it.
7781 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
7783 ret = sf1_read(adapter, 3, 0, 1, &flashid);
7784 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
7789 * Check to see if it's one of our non-standard supported Flash parts.
7791 for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
7792 if (supported_flash[part].vendor_and_model_id == flashid) {
7793 adapter->params.sf_size =
7794 supported_flash[part].size_mb;
7795 adapter->params.sf_nsec =
7796 adapter->params.sf_size / SF_SEC_SIZE;
7801 * Decode Flash part size. The code below looks repetative with
7802 * common encodings, but that's not guaranteed in the JEDEC
7803 * specification for the Read JADEC ID command. The only thing that
7804 * we're guaranteed by the JADEC specification is where the
7805 * Manufacturer ID is in the returned result. After that each
7806 * Manufacturer ~could~ encode things completely differently.
7807 * Note, all Flash parts must have 64KB sectors.
7809 manufacturer = flashid & 0xff;
7810 switch (manufacturer) {
7811 case 0x20: /* Micron/Numonix */
7813 * This Density -> Size decoding table is taken from Micron
7816 density = (flashid >> 16) & 0xff;
7818 case 0x14: size = 1 << 20; break; /* 1MB */
7819 case 0x15: size = 1 << 21; break; /* 2MB */
7820 case 0x16: size = 1 << 22; break; /* 4MB */
7821 case 0x17: size = 1 << 23; break; /* 8MB */
7822 case 0x18: size = 1 << 24; break; /* 16MB */
7823 case 0x19: size = 1 << 25; break; /* 32MB */
7824 case 0x20: size = 1 << 26; break; /* 64MB */
7825 case 0x21: size = 1 << 27; break; /* 128MB */
7826 case 0x22: size = 1 << 28; break; /* 256MB */
7830 case 0x9d: /* ISSI -- Integrated Silicon Solution, Inc. */
7832 * This Density -> Size decoding table is taken from ISSI
7835 density = (flashid >> 16) & 0xff;
7837 case 0x16: size = 1 << 25; break; /* 32MB */
7838 case 0x17: size = 1 << 26; break; /* 64MB */
7842 case 0xc2: /* Macronix */
7844 * This Density -> Size decoding table is taken from Macronix
7847 density = (flashid >> 16) & 0xff;
7849 case 0x17: size = 1 << 23; break; /* 8MB */
7850 case 0x18: size = 1 << 24; break; /* 16MB */
7854 case 0xef: /* Winbond */
7856 * This Density -> Size decoding table is taken from Winbond
7859 density = (flashid >> 16) & 0xff;
7861 case 0x17: size = 1 << 23; break; /* 8MB */
7862 case 0x18: size = 1 << 24; break; /* 16MB */
7867 /* If we didn't recognize the FLASH part, that's no real issue: the
7868 * Hardware/Software contract says that Hardware will _*ALWAYS*_
7869 * use a FLASH part which is at least 4MB in size and has 64KB
7870 * sectors. The unrecognized FLASH part is likely to be much larger
7871 * than 4MB, but that's all we really need.
7874 CH_WARN(adapter, "Unknown Flash Part, ID = %#x, assuming 4MB\n", flashid);
7879 * Store decoded Flash size and fall through into vetting code.
7881 adapter->params.sf_size = size;
7882 adapter->params.sf_nsec = size / SF_SEC_SIZE;
7886 * We should ~probably~ reject adapters with FLASHes which are too
7887 * small but we have some legacy FPGAs with small FLASHes that we'd
7888 * still like to use. So instead we emit a scary message ...
7890 if (adapter->params.sf_size < FLASH_MIN_SIZE)
7891 CH_WARN(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
7892 flashid, adapter->params.sf_size, FLASH_MIN_SIZE);
7897 static void set_pcie_completion_timeout(struct adapter *adapter,
7903 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
7905 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
7908 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
7912 const struct chip_params *t4_get_chip_params(int chipid)
7914 static const struct chip_params chip_params[] = {
7918 .pm_stats_cnt = PM_NSTATS,
7919 .cng_ch_bits_log = 2,
7921 .cim_num_obq = CIM_NUM_OBQ,
7922 .mps_rplc_size = 128,
7924 .sge_fl_db = F_DBPRIO,
7925 .mps_tcam_size = NUM_MPS_CLS_SRAM_L_INSTANCES,
7930 .pm_stats_cnt = PM_NSTATS,
7931 .cng_ch_bits_log = 2,
7933 .cim_num_obq = CIM_NUM_OBQ_T5,
7934 .mps_rplc_size = 128,
7936 .sge_fl_db = F_DBPRIO | F_DBTYPE,
7937 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
7942 .pm_stats_cnt = T6_PM_NSTATS,
7943 .cng_ch_bits_log = 3,
7945 .cim_num_obq = CIM_NUM_OBQ_T5,
7946 .mps_rplc_size = 256,
7949 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
7953 chipid -= CHELSIO_T4;
7954 if (chipid < 0 || chipid >= ARRAY_SIZE(chip_params))
7957 return &chip_params[chipid];
7961 * t4_prep_adapter - prepare SW and HW for operation
7962 * @adapter: the adapter
7963 * @buf: temporary space of at least VPD_LEN size provided by the caller.
7965 * Initialize adapter SW state for the various HW modules, set initial
7966 * values for some adapter tunables, take PHYs out of reset, and
7967 * initialize the MDIO interface.
7969 int t4_prep_adapter(struct adapter *adapter, u8 *buf)
7975 get_pci_mode(adapter, &adapter->params.pci);
7977 pl_rev = t4_read_reg(adapter, A_PL_REV);
7978 adapter->params.chipid = G_CHIPID(pl_rev);
7979 adapter->params.rev = G_REV(pl_rev);
7980 if (adapter->params.chipid == 0) {
7981 /* T4 did not have chipid in PL_REV (T5 onwards do) */
7982 adapter->params.chipid = CHELSIO_T4;
7984 /* T4A1 chip is not supported */
7985 if (adapter->params.rev == 1) {
7986 CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n");
7991 adapter->chip_params = t4_get_chip_params(chip_id(adapter));
7992 if (adapter->chip_params == NULL)
7995 adapter->params.pci.vpd_cap_addr =
7996 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
7998 ret = t4_get_flash_params(adapter);
8002 ret = get_vpd_params(adapter, &adapter->params.vpd, buf);
8006 /* Cards with real ASICs have the chipid in the PCIe device id */
8007 t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id);
8008 if (device_id >> 12 == chip_id(adapter))
8009 adapter->params.cim_la_size = CIMLA_SIZE;
8012 adapter->params.fpga = 1;
8013 adapter->params.cim_la_size = 2 * CIMLA_SIZE;
8016 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
8019 * Default port and clock for debugging in case we can't reach FW.
8021 adapter->params.nports = 1;
8022 adapter->params.portvec = 1;
8023 adapter->params.vpd.cclk = 50000;
8025 /* Set pci completion timeout value to 4 seconds. */
8026 set_pcie_completion_timeout(adapter, 0xd);
8031 * t4_shutdown_adapter - shut down adapter, host & wire
8032 * @adapter: the adapter
8034 * Perform an emergency shutdown of the adapter and stop it from
8035 * continuing any further communication on the ports or DMA to the
8036 * host. This is typically used when the adapter and/or firmware
8037 * have crashed and we want to prevent any further accidental
8038 * communication with the rest of the world. This will also force
8039 * the port Link Status to go down -- if register writes work --
8040 * which should help our peers figure out that we're down.
8042 int t4_shutdown_adapter(struct adapter *adapter)
8046 t4_intr_disable(adapter);
8047 t4_write_reg(adapter, A_DBG_GPIO_EN, 0);
8048 for_each_port(adapter, port) {
8049 u32 a_port_cfg = is_t4(adapter) ?
8050 PORT_REG(port, A_XGMAC_PORT_CFG) :
8051 T5_PORT_REG(port, A_MAC_PORT_CFG);
8053 t4_write_reg(adapter, a_port_cfg,
8054 t4_read_reg(adapter, a_port_cfg)
8055 & ~V_SIGNAL_DET(1));
8057 t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0);
8063 * t4_init_devlog_params - initialize adapter->params.devlog
8064 * @adap: the adapter
8065 * @fw_attach: whether we can talk to the firmware
8067 * Initialize various fields of the adapter's Firmware Device Log
8068 * Parameters structure.
8070 int t4_init_devlog_params(struct adapter *adap, int fw_attach)
8072 struct devlog_params *dparams = &adap->params.devlog;
8074 unsigned int devlog_meminfo;
8075 struct fw_devlog_cmd devlog_cmd;
8078 /* If we're dealing with newer firmware, the Device Log Paramerters
8079 * are stored in a designated register which allows us to access the
8080 * Device Log even if we can't talk to the firmware.
8083 t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG));
8085 unsigned int nentries, nentries128;
8087 dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams);
8088 dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4;
8090 nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams);
8091 nentries = (nentries128 + 1) * 128;
8092 dparams->size = nentries * sizeof(struct fw_devlog_e);
8098 * For any failing returns ...
8100 memset(dparams, 0, sizeof *dparams);
8103 * If we can't talk to the firmware, there's really nothing we can do
8109 /* Otherwise, ask the firmware for it's Device Log Parameters.
8111 memset(&devlog_cmd, 0, sizeof devlog_cmd);
8112 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
8113 F_FW_CMD_REQUEST | F_FW_CMD_READ);
8114 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
8115 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
8121 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
8122 dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo);
8123 dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4;
8124 dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
8130 * t4_init_sge_params - initialize adap->params.sge
8131 * @adapter: the adapter
8133 * Initialize various fields of the adapter's SGE Parameters structure.
8135 int t4_init_sge_params(struct adapter *adapter)
8138 struct sge_params *sp = &adapter->params.sge;
8139 unsigned i, tscale = 1;
8141 r = t4_read_reg(adapter, A_SGE_INGRESS_RX_THRESHOLD);
8142 sp->counter_val[0] = G_THRESHOLD_0(r);
8143 sp->counter_val[1] = G_THRESHOLD_1(r);
8144 sp->counter_val[2] = G_THRESHOLD_2(r);
8145 sp->counter_val[3] = G_THRESHOLD_3(r);
8147 if (chip_id(adapter) >= CHELSIO_T6) {
8148 r = t4_read_reg(adapter, A_SGE_ITP_CONTROL);
8149 tscale = G_TSCALE(r);
8156 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_0_AND_1);
8157 sp->timer_val[0] = core_ticks_to_us(adapter, G_TIMERVALUE0(r)) * tscale;
8158 sp->timer_val[1] = core_ticks_to_us(adapter, G_TIMERVALUE1(r)) * tscale;
8159 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_2_AND_3);
8160 sp->timer_val[2] = core_ticks_to_us(adapter, G_TIMERVALUE2(r)) * tscale;
8161 sp->timer_val[3] = core_ticks_to_us(adapter, G_TIMERVALUE3(r)) * tscale;
8162 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_4_AND_5);
8163 sp->timer_val[4] = core_ticks_to_us(adapter, G_TIMERVALUE4(r)) * tscale;
8164 sp->timer_val[5] = core_ticks_to_us(adapter, G_TIMERVALUE5(r)) * tscale;
8166 r = t4_read_reg(adapter, A_SGE_CONM_CTRL);
8167 sp->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1;
8169 sp->fl_starve_threshold2 = sp->fl_starve_threshold;
8170 else if (is_t5(adapter))
8171 sp->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(r) * 2 + 1;
8173 sp->fl_starve_threshold2 = G_T6_EGRTHRESHOLDPACKING(r) * 2 + 1;
8175 /* egress queues: log2 of # of doorbells per BAR2 page */
8176 r = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
8177 r >>= S_QUEUESPERPAGEPF0 +
8178 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
8179 sp->eq_s_qpp = r & M_QUEUESPERPAGEPF0;
8181 /* ingress queues: log2 of # of doorbells per BAR2 page */
8182 r = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
8183 r >>= S_QUEUESPERPAGEPF0 +
8184 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
8185 sp->iq_s_qpp = r & M_QUEUESPERPAGEPF0;
8187 r = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
8188 r >>= S_HOSTPAGESIZEPF0 +
8189 (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf;
8190 sp->page_shift = (r & M_HOSTPAGESIZEPF0) + 10;
8192 r = t4_read_reg(adapter, A_SGE_CONTROL);
8193 sp->sge_control = r;
8194 sp->spg_len = r & F_EGRSTATUSPAGESIZE ? 128 : 64;
8195 sp->fl_pktshift = G_PKTSHIFT(r);
8196 if (chip_id(adapter) <= CHELSIO_T5) {
8197 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) +
8198 X_INGPADBOUNDARY_SHIFT);
8200 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) +
8201 X_T6_INGPADBOUNDARY_SHIFT);
8204 sp->pack_boundary = sp->pad_boundary;
8206 r = t4_read_reg(adapter, A_SGE_CONTROL2);
8207 if (G_INGPACKBOUNDARY(r) == 0)
8208 sp->pack_boundary = 16;
8210 sp->pack_boundary = 1 << (G_INGPACKBOUNDARY(r) + 5);
8212 for (i = 0; i < SGE_FLBUF_SIZES; i++)
8213 sp->sge_fl_buffer_size[i] = t4_read_reg(adapter,
8214 A_SGE_FL_BUFFER_SIZE0 + (4 * i));
8220 * Read and cache the adapter's compressed filter mode and ingress config.
8222 static void read_filter_mode_and_ingress_config(struct adapter *adap,
8225 struct tp_params *tpp = &adap->params.tp;
8227 t4_tp_pio_read(adap, &tpp->vlan_pri_map, 1, A_TP_VLAN_PRI_MAP,
8229 t4_tp_pio_read(adap, &tpp->ingress_config, 1, A_TP_INGRESS_CONFIG,
8233 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
8234 * shift positions of several elements of the Compressed Filter Tuple
8235 * for this adapter which we need frequently ...
8237 tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE);
8238 tpp->port_shift = t4_filter_field_shift(adap, F_PORT);
8239 tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
8240 tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN);
8241 tpp->tos_shift = t4_filter_field_shift(adap, F_TOS);
8242 tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
8243 tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE);
8244 tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH);
8245 tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE);
8246 tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION);
8249 * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
8250 * represents the presense of an Outer VLAN instead of a VNIC ID.
8252 if ((tpp->ingress_config & F_VNIC) == 0)
8253 tpp->vnic_shift = -1;
8257 * t4_init_tp_params - initialize adap->params.tp
8258 * @adap: the adapter
8260 * Initialize various fields of the adapter's TP Parameters structure.
8262 int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
8266 struct tp_params *tpp = &adap->params.tp;
8268 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
8269 tpp->tre = G_TIMERRESOLUTION(v);
8270 tpp->dack_re = G_DELAYEDACKRESOLUTION(v);
8272 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
8273 for (chan = 0; chan < MAX_NCHAN; chan++)
8274 tpp->tx_modq[chan] = chan;
8276 read_filter_mode_and_ingress_config(adap, sleep_ok);
8279 * Cache a mask of the bits that represent the error vector portion of
8280 * rx_pkt.err_vec. T6+ can use a compressed error vector to make room
8281 * for information about outer encapsulation (GENEVE/VXLAN/NVGRE).
8283 tpp->err_vec_mask = htobe16(0xffff);
8284 if (chip_id(adap) > CHELSIO_T5) {
8285 v = t4_read_reg(adap, A_TP_OUT_CONFIG);
8286 if (v & F_CRXPKTENC) {
8288 htobe16(V_T6_COMPR_RXERR_VEC(M_T6_COMPR_RXERR_VEC));
8296 * t4_filter_field_shift - calculate filter field shift
8297 * @adap: the adapter
8298 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
8300 * Return the shift position of a filter field within the Compressed
8301 * Filter Tuple. The filter field is specified via its selection bit
8302 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
8304 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
8306 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
8310 if ((filter_mode & filter_sel) == 0)
8313 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
8314 switch (filter_mode & sel) {
8316 field_shift += W_FT_FCOE;
8319 field_shift += W_FT_PORT;
8322 field_shift += W_FT_VNIC_ID;
8325 field_shift += W_FT_VLAN;
8328 field_shift += W_FT_TOS;
8331 field_shift += W_FT_PROTOCOL;
8334 field_shift += W_FT_ETHERTYPE;
8337 field_shift += W_FT_MACMATCH;
8340 field_shift += W_FT_MPSHITTYPE;
8342 case F_FRAGMENTATION:
8343 field_shift += W_FT_FRAGMENTATION;
8350 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id)
8355 struct port_info *p = adap2pinfo(adap, port_id);
8358 for (i = 0, j = -1; i <= p->port_id; i++) {
8361 } while ((adap->params.portvec & (1 << j)) == 0);
8364 if (!(adap->flags & IS_VF) ||
8365 adap->params.vfres.r_caps & FW_CMD_CAP_PORT) {
8366 t4_update_port_info(p);
8369 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
8373 p->vi[0].viid = ret;
8374 if (chip_id(adap) <= CHELSIO_T5)
8375 p->vi[0].smt_idx = (ret & 0x7f) << 1;
8377 p->vi[0].smt_idx = (ret & 0x7f);
8379 p->rx_chan_map = t4_get_mps_bg_map(adap, j);
8381 p->vi[0].rss_size = rss_size;
8382 t4_os_set_hw_addr(p, addr);
8384 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
8385 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
8386 V_FW_PARAMS_PARAM_YZ(p->vi[0].viid);
8387 ret = t4_query_params(adap, mbox, pf, vf, 1, ¶m, &val);
8389 p->vi[0].rss_base = 0xffff;
8391 /* MPASS((val >> 16) == rss_size); */
8392 p->vi[0].rss_base = val & 0xffff;
8399 * t4_read_cimq_cfg - read CIM queue configuration
8400 * @adap: the adapter
8401 * @base: holds the queue base addresses in bytes
8402 * @size: holds the queue sizes in bytes
8403 * @thres: holds the queue full thresholds in bytes
8405 * Returns the current configuration of the CIM queues, starting with
8406 * the IBQs, then the OBQs.
8408 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
8411 int cim_num_obq = adap->chip_params->cim_num_obq;
8413 for (i = 0; i < CIM_NUM_IBQ; i++) {
8414 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
8416 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
8417 /* value is in 256-byte units */
8418 *base++ = G_CIMQBASE(v) * 256;
8419 *size++ = G_CIMQSIZE(v) * 256;
8420 *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
8422 for (i = 0; i < cim_num_obq; i++) {
8423 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
8425 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
8426 /* value is in 256-byte units */
8427 *base++ = G_CIMQBASE(v) * 256;
8428 *size++ = G_CIMQSIZE(v) * 256;
8433 * t4_read_cim_ibq - read the contents of a CIM inbound queue
8434 * @adap: the adapter
8435 * @qid: the queue index
8436 * @data: where to store the queue contents
8437 * @n: capacity of @data in 32-bit words
8439 * Reads the contents of the selected CIM queue starting at address 0 up
8440 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
8441 * error and the number of 32-bit words actually read on success.
8443 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
8445 int i, err, attempts;
8447 const unsigned int nwords = CIM_IBQ_SIZE * 4;
8449 if (qid > 5 || (n & 3))
8452 addr = qid * nwords;
8456 /* It might take 3-10ms before the IBQ debug read access is allowed.
8457 * Wait for 1 Sec with a delay of 1 usec.
8461 for (i = 0; i < n; i++, addr++) {
8462 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
8464 err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
8468 *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
8470 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
8475 * t4_read_cim_obq - read the contents of a CIM outbound queue
8476 * @adap: the adapter
8477 * @qid: the queue index
8478 * @data: where to store the queue contents
8479 * @n: capacity of @data in 32-bit words
8481 * Reads the contents of the selected CIM queue starting at address 0 up
8482 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
8483 * error and the number of 32-bit words actually read on success.
8485 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
8488 unsigned int addr, v, nwords;
8489 int cim_num_obq = adap->chip_params->cim_num_obq;
8491 if ((qid > (cim_num_obq - 1)) || (n & 3))
8494 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
8495 V_QUENUMSELECT(qid));
8496 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
8498 addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */
8499 nwords = G_CIMQSIZE(v) * 64; /* same */
8503 for (i = 0; i < n; i++, addr++) {
8504 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
8506 err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
8510 *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
8512 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
8518 CIM_CTL_BASE = 0x2000,
8519 CIM_PBT_ADDR_BASE = 0x2800,
8520 CIM_PBT_LRF_BASE = 0x3000,
8521 CIM_PBT_DATA_BASE = 0x3800
8525 * t4_cim_read - read a block from CIM internal address space
8526 * @adap: the adapter
8527 * @addr: the start address within the CIM address space
8528 * @n: number of words to read
8529 * @valp: where to store the result
8531 * Reads a block of 4-byte words from the CIM intenal address space.
8533 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
8538 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
8541 for ( ; !ret && n--; addr += 4) {
8542 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
8543 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
8546 *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
8552 * t4_cim_write - write a block into CIM internal address space
8553 * @adap: the adapter
8554 * @addr: the start address within the CIM address space
8555 * @n: number of words to write
8556 * @valp: set of values to write
8558 * Writes a block of 4-byte words into the CIM intenal address space.
8560 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
8561 const unsigned int *valp)
8565 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
8568 for ( ; !ret && n--; addr += 4) {
8569 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
8570 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
8571 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
8577 static int t4_cim_write1(struct adapter *adap, unsigned int addr,
8580 return t4_cim_write(adap, addr, 1, &val);
8584 * t4_cim_ctl_read - read a block from CIM control region
8585 * @adap: the adapter
8586 * @addr: the start address within the CIM control region
8587 * @n: number of words to read
8588 * @valp: where to store the result
8590 * Reads a block of 4-byte words from the CIM control region.
8592 int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
8595 return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
8599 * t4_cim_read_la - read CIM LA capture buffer
8600 * @adap: the adapter
8601 * @la_buf: where to store the LA data
8602 * @wrptr: the HW write pointer within the capture buffer
8604 * Reads the contents of the CIM LA buffer with the most recent entry at
8605 * the end of the returned data and with the entry at @wrptr first.
8606 * We try to leave the LA in the running state we find it in.
8608 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
8611 unsigned int cfg, val, idx;
8613 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
8617 if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */
8618 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
8623 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
8627 idx = G_UPDBGLAWRPTR(val);
8631 for (i = 0; i < adap->params.cim_la_size; i++) {
8632 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
8633 V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
8636 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
8639 if (val & F_UPDBGLARDEN) {
8643 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
8647 /* address can't exceed 0xfff (UpDbgLaRdPtr is of 12-bits) */
8648 idx = (idx + 1) & M_UPDBGLARDPTR;
8650 * Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
8651 * identify the 32-bit portion of the full 312-bit data
8654 while ((idx & 0xf) > 9)
8655 idx = (idx + 1) % M_UPDBGLARDPTR;
8658 if (cfg & F_UPDBGLAEN) {
8659 int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
8660 cfg & ~F_UPDBGLARDEN);
8668 * t4_tp_read_la - read TP LA capture buffer
8669 * @adap: the adapter
8670 * @la_buf: where to store the LA data
8671 * @wrptr: the HW write pointer within the capture buffer
8673 * Reads the contents of the TP LA buffer with the most recent entry at
8674 * the end of the returned data and with the entry at @wrptr first.
8675 * We leave the LA in the running state we find it in.
8677 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
8679 bool last_incomplete;
8680 unsigned int i, cfg, val, idx;
8682 cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
8683 if (cfg & F_DBGLAENABLE) /* freeze LA */
8684 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
8685 adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
8687 val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
8688 idx = G_DBGLAWPTR(val);
8689 last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
8690 if (last_incomplete)
8691 idx = (idx + 1) & M_DBGLARPTR;
8696 val &= ~V_DBGLARPTR(M_DBGLARPTR);
8697 val |= adap->params.tp.la_mask;
8699 for (i = 0; i < TPLA_SIZE; i++) {
8700 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
8701 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
8702 idx = (idx + 1) & M_DBGLARPTR;
8705 /* Wipe out last entry if it isn't valid */
8706 if (last_incomplete)
8707 la_buf[TPLA_SIZE - 1] = ~0ULL;
8709 if (cfg & F_DBGLAENABLE) /* restore running state */
8710 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
8711 cfg | adap->params.tp.la_mask);
8715 * SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
8716 * seconds). If we find one of the SGE Ingress DMA State Machines in the same
8717 * state for more than the Warning Threshold then we'll issue a warning about
8718 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel
8719 * appears to be hung every Warning Repeat second till the situation clears.
8720 * If the situation clears, we'll note that as well.
8722 #define SGE_IDMA_WARN_THRESH 1
8723 #define SGE_IDMA_WARN_REPEAT 300
8726 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
8727 * @adapter: the adapter
8728 * @idma: the adapter IDMA Monitor state
8730 * Initialize the state of an SGE Ingress DMA Monitor.
8732 void t4_idma_monitor_init(struct adapter *adapter,
8733 struct sge_idma_monitor_state *idma)
8735 /* Initialize the state variables for detecting an SGE Ingress DMA
8736 * hang. The SGE has internal counters which count up on each clock
8737 * tick whenever the SGE finds its Ingress DMA State Engines in the
8738 * same state they were on the previous clock tick. The clock used is
8739 * the Core Clock so we have a limit on the maximum "time" they can
8740 * record; typically a very small number of seconds. For instance,
8741 * with a 600MHz Core Clock, we can only count up to a bit more than
8742 * 7s. So we'll synthesize a larger counter in order to not run the
8743 * risk of having the "timers" overflow and give us the flexibility to
8744 * maintain a Hung SGE State Machine of our own which operates across
8745 * a longer time frame.
8747 idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
8748 idma->idma_stalled[0] = idma->idma_stalled[1] = 0;
8752 * t4_idma_monitor - monitor SGE Ingress DMA state
8753 * @adapter: the adapter
8754 * @idma: the adapter IDMA Monitor state
8755 * @hz: number of ticks/second
8756 * @ticks: number of ticks since the last IDMA Monitor call
8758 void t4_idma_monitor(struct adapter *adapter,
8759 struct sge_idma_monitor_state *idma,
8762 int i, idma_same_state_cnt[2];
8764 /* Read the SGE Debug Ingress DMA Same State Count registers. These
8765 * are counters inside the SGE which count up on each clock when the
8766 * SGE finds its Ingress DMA State Engines in the same states they
8767 * were in the previous clock. The counters will peg out at
8768 * 0xffffffff without wrapping around so once they pass the 1s
8769 * threshold they'll stay above that till the IDMA state changes.
8771 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13);
8772 idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH);
8773 idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
8775 for (i = 0; i < 2; i++) {
8776 u32 debug0, debug11;
8778 /* If the Ingress DMA Same State Counter ("timer") is less
8779 * than 1s, then we can reset our synthesized Stall Timer and
8780 * continue. If we have previously emitted warnings about a
8781 * potential stalled Ingress Queue, issue a note indicating
8782 * that the Ingress Queue has resumed forward progress.
8784 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
8785 if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz)
8786 CH_WARN(adapter, "SGE idma%d, queue %u, "
8787 "resumed after %d seconds\n",
8788 i, idma->idma_qid[i],
8789 idma->idma_stalled[i]/hz);
8790 idma->idma_stalled[i] = 0;
8794 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
8795 * domain. The first time we get here it'll be because we
8796 * passed the 1s Threshold; each additional time it'll be
8797 * because the RX Timer Callback is being fired on its regular
8800 * If the stall is below our Potential Hung Ingress Queue
8801 * Warning Threshold, continue.
8803 if (idma->idma_stalled[i] == 0) {
8804 idma->idma_stalled[i] = hz;
8805 idma->idma_warn[i] = 0;
8807 idma->idma_stalled[i] += ticks;
8808 idma->idma_warn[i] -= ticks;
8811 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz)
8814 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
8816 if (idma->idma_warn[i] > 0)
8818 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz;
8820 /* Read and save the SGE IDMA State and Queue ID information.
8821 * We do this every time in case it changes across time ...
8822 * can't be too careful ...
8824 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0);
8825 debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
8826 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
8828 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11);
8829 debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
8830 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
8832 CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in "
8833 " state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
8834 i, idma->idma_qid[i], idma->idma_state[i],
8835 idma->idma_stalled[i]/hz,
8837 t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
8842 * t4_read_pace_tbl - read the pace table
8843 * @adap: the adapter
8844 * @pace_vals: holds the returned values
8846 * Returns the values of TP's pace table in microseconds.
8848 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
8852 for (i = 0; i < NTX_SCHED; i++) {
8853 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
8854 v = t4_read_reg(adap, A_TP_PACE_TABLE);
8855 pace_vals[i] = dack_ticks_to_usec(adap, v);
8860 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
8861 * @adap: the adapter
8862 * @sched: the scheduler index
8863 * @kbps: the byte rate in Kbps
8864 * @ipg: the interpacket delay in tenths of nanoseconds
8866 * Return the current configuration of a HW Tx scheduler.
8868 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
8869 unsigned int *ipg, bool sleep_ok)
8871 unsigned int v, addr, bpt, cpt;
8874 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
8875 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
8878 bpt = (v >> 8) & 0xff;
8881 *kbps = 0; /* scheduler disabled */
8883 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
8884 *kbps = (v * bpt) / 125;
8888 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
8889 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
8893 *ipg = (10000 * v) / core_ticks_per_usec(adap);
8898 * t4_load_cfg - download config file
8899 * @adap: the adapter
8900 * @cfg_data: the cfg text file to write
8901 * @size: text file size
8903 * Write the supplied config text file to the card's serial flash.
8905 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
8907 int ret, i, n, cfg_addr;
8909 unsigned int flash_cfg_start_sec;
8910 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
8912 cfg_addr = t4_flash_cfg_addr(adap);
8917 flash_cfg_start_sec = addr / SF_SEC_SIZE;
8919 if (size > FLASH_CFG_MAX_SIZE) {
8920 CH_ERR(adap, "cfg file too large, max is %u bytes\n",
8921 FLASH_CFG_MAX_SIZE);
8925 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
8927 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
8928 flash_cfg_start_sec + i - 1);
8930 * If size == 0 then we're simply erasing the FLASH sectors associated
8931 * with the on-adapter Firmware Configuration File.
8933 if (ret || size == 0)
8936 /* this will write to the flash up to SF_PAGE_SIZE at a time */
8937 for (i = 0; i< size; i+= SF_PAGE_SIZE) {
8938 if ( (size - i) < SF_PAGE_SIZE)
8942 ret = t4_write_flash(adap, addr, n, cfg_data, 1);
8946 addr += SF_PAGE_SIZE;
8947 cfg_data += SF_PAGE_SIZE;
8952 CH_ERR(adap, "config file %s failed %d\n",
8953 (size == 0 ? "clear" : "download"), ret);
8958 * t5_fw_init_extern_mem - initialize the external memory
8959 * @adap: the adapter
8961 * Initializes the external memory on T5.
8963 int t5_fw_init_extern_mem(struct adapter *adap)
8965 u32 params[1], val[1];
8971 val[0] = 0xff; /* Initialize all MCs */
8972 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
8973 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT));
8974 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val,
8975 FW_CMD_MAX_TIMEOUT);
8980 /* BIOS boot headers */
8981 typedef struct pci_expansion_rom_header {
8982 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
8983 u8 reserved[22]; /* Reserved per processor Architecture data */
8984 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
8985 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
8987 /* Legacy PCI Expansion ROM Header */
8988 typedef struct legacy_pci_expansion_rom_header {
8989 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
8990 u8 size512; /* Current Image Size in units of 512 bytes */
8991 u8 initentry_point[4];
8992 u8 cksum; /* Checksum computed on the entire Image */
8993 u8 reserved[16]; /* Reserved */
8994 u8 pcir_offset[2]; /* Offset to PCI Data Struture */
8995 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
8997 /* EFI PCI Expansion ROM Header */
8998 typedef struct efi_pci_expansion_rom_header {
8999 u8 signature[2]; // ROM signature. The value 0xaa55
9000 u8 initialization_size[2]; /* Units 512. Includes this header */
9001 u8 efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
9002 u8 efi_subsystem[2]; /* Subsystem value for EFI image header */
9003 u8 efi_machine_type[2]; /* Machine type from EFI image header */
9004 u8 compression_type[2]; /* Compression type. */
9006 * Compression type definition
9009 * 0x2-0xFFFF: Reserved
9011 u8 reserved[8]; /* Reserved */
9012 u8 efi_image_header_offset[2]; /* Offset to EFI Image */
9013 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
9014 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
9016 /* PCI Data Structure Format */
9017 typedef struct pcir_data_structure { /* PCI Data Structure */
9018 u8 signature[4]; /* Signature. The string "PCIR" */
9019 u8 vendor_id[2]; /* Vendor Identification */
9020 u8 device_id[2]; /* Device Identification */
9021 u8 vital_product[2]; /* Pointer to Vital Product Data */
9022 u8 length[2]; /* PCIR Data Structure Length */
9023 u8 revision; /* PCIR Data Structure Revision */
9024 u8 class_code[3]; /* Class Code */
9025 u8 image_length[2]; /* Image Length. Multiple of 512B */
9026 u8 code_revision[2]; /* Revision Level of Code/Data */
9027 u8 code_type; /* Code Type. */
9029 * PCI Expansion ROM Code Types
9030 * 0x00: Intel IA-32, PC-AT compatible. Legacy
9031 * 0x01: Open Firmware standard for PCI. FCODE
9032 * 0x02: Hewlett-Packard PA RISC. HP reserved
9033 * 0x03: EFI Image. EFI
9034 * 0x04-0xFF: Reserved.
9036 u8 indicator; /* Indicator. Identifies the last image in the ROM */
9037 u8 reserved[2]; /* Reserved */
9038 } pcir_data_t; /* PCI__DATA_STRUCTURE */
9040 /* BOOT constants */
9042 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
9043 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */
9044 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */
9045 BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
9046 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment */
9047 VENDOR_ID = 0x1425, /* Vendor ID */
9048 PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
9052 * modify_device_id - Modifies the device ID of the Boot BIOS image
9053 * @adatper: the device ID to write.
9054 * @boot_data: the boot image to modify.
9056 * Write the supplied device ID to the boot BIOS image.
9058 static void modify_device_id(int device_id, u8 *boot_data)
9060 legacy_pci_exp_rom_header_t *header;
9061 pcir_data_t *pcir_header;
9065 * Loop through all chained images and change the device ID's
9068 header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
9069 pcir_header = (pcir_data_t *) &boot_data[cur_header +
9070 le16_to_cpu(*(u16*)header->pcir_offset)];
9073 * Only modify the Device ID if code type is Legacy or HP.
9074 * 0x00: Okay to modify
9075 * 0x01: FCODE. Do not be modify
9076 * 0x03: Okay to modify
9077 * 0x04-0xFF: Do not modify
9079 if (pcir_header->code_type == 0x00) {
9084 * Modify Device ID to match current adatper
9086 *(u16*) pcir_header->device_id = device_id;
9089 * Set checksum temporarily to 0.
9090 * We will recalculate it later.
9092 header->cksum = 0x0;
9095 * Calculate and update checksum
9097 for (i = 0; i < (header->size512 * 512); i++)
9098 csum += (u8)boot_data[cur_header + i];
9101 * Invert summed value to create the checksum
9102 * Writing new checksum value directly to the boot data
9104 boot_data[cur_header + 7] = -csum;
9106 } else if (pcir_header->code_type == 0x03) {
9109 * Modify Device ID to match current adatper
9111 *(u16*) pcir_header->device_id = device_id;
9117 * Check indicator element to identify if this is the last
9120 if (pcir_header->indicator & 0x80)
9124 * Move header pointer up to the next image in the ROM.
9126 cur_header += header->size512 * 512;
9131 * t4_load_boot - download boot flash
9132 * @adapter: the adapter
9133 * @boot_data: the boot image to write
9134 * @boot_addr: offset in flash to write boot_data
9137 * Write the supplied boot image to the card's serial flash.
9138 * The boot image has the following sections: a 28-byte header and the
9141 int t4_load_boot(struct adapter *adap, u8 *boot_data,
9142 unsigned int boot_addr, unsigned int size)
9144 pci_exp_rom_header_t *header;
9146 pcir_data_t *pcir_header;
9150 unsigned int boot_sector = (boot_addr * 1024 );
9151 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
9154 * Make sure the boot image does not encroach on the firmware region
9156 if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
9157 CH_ERR(adap, "boot image encroaching on firmware region\n");
9162 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
9163 * and Boot configuration data sections. These 3 boot sections span
9164 * sectors 0 to 7 in flash and live right before the FW image location.
9166 i = DIV_ROUND_UP(size ? size : FLASH_FW_START,
9168 ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
9169 (boot_sector >> 16) + i - 1);
9172 * If size == 0 then we're simply erasing the FLASH sectors associated
9173 * with the on-adapter option ROM file
9175 if (ret || (size == 0))
9178 /* Get boot header */
9179 header = (pci_exp_rom_header_t *)boot_data;
9180 pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
9181 /* PCIR Data Structure */
9182 pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
9185 * Perform some primitive sanity testing to avoid accidentally
9186 * writing garbage over the boot sectors. We ought to check for
9187 * more but it's not worth it for now ...
9189 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
9190 CH_ERR(adap, "boot image too small/large\n");
9194 #ifndef CHELSIO_T4_DIAGS
9196 * Check BOOT ROM header signature
9198 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
9199 CH_ERR(adap, "Boot image missing signature\n");
9204 * Check PCI header signature
9206 if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
9207 CH_ERR(adap, "PCI header missing signature\n");
9212 * Check Vendor ID matches Chelsio ID
9214 if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
9215 CH_ERR(adap, "Vendor ID missing signature\n");
9221 * Retrieve adapter's device ID
9223 t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
9224 /* Want to deal with PF 0 so I strip off PF 4 indicator */
9225 device_id = device_id & 0xf0ff;
9228 * Check PCIE Device ID
9230 if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
9232 * Change the device ID in the Boot BIOS image to match
9233 * the Device ID of the current adapter.
9235 modify_device_id(device_id, boot_data);
9239 * Skip over the first SF_PAGE_SIZE worth of data and write it after
9240 * we finish copying the rest of the boot image. This will ensure
9241 * that the BIOS boot header will only be written if the boot image
9242 * was written in full.
9245 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
9246 addr += SF_PAGE_SIZE;
9247 boot_data += SF_PAGE_SIZE;
9248 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
9253 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
9254 (const u8 *)header, 0);
9258 CH_ERR(adap, "boot image download failed, error %d\n", ret);
9263 * t4_flash_bootcfg_addr - return the address of the flash optionrom configuration
9264 * @adapter: the adapter
9266 * Return the address within the flash where the OptionROM Configuration
9267 * is stored, or an error if the device FLASH is too small to contain
9268 * a OptionROM Configuration.
9270 static int t4_flash_bootcfg_addr(struct adapter *adapter)
9273 * If the device FLASH isn't large enough to hold a Firmware
9274 * Configuration File, return an error.
9276 if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE)
9279 return FLASH_BOOTCFG_START;
9282 int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size)
9284 int ret, i, n, cfg_addr;
9286 unsigned int flash_cfg_start_sec;
9287 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
9289 cfg_addr = t4_flash_bootcfg_addr(adap);
9294 flash_cfg_start_sec = addr / SF_SEC_SIZE;
9296 if (size > FLASH_BOOTCFG_MAX_SIZE) {
9297 CH_ERR(adap, "bootcfg file too large, max is %u bytes\n",
9298 FLASH_BOOTCFG_MAX_SIZE);
9302 i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */
9304 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
9305 flash_cfg_start_sec + i - 1);
9308 * If size == 0 then we're simply erasing the FLASH sectors associated
9309 * with the on-adapter OptionROM Configuration File.
9311 if (ret || size == 0)
9314 /* this will write to the flash up to SF_PAGE_SIZE at a time */
9315 for (i = 0; i< size; i+= SF_PAGE_SIZE) {
9316 if ( (size - i) < SF_PAGE_SIZE)
9320 ret = t4_write_flash(adap, addr, n, cfg_data, 0);
9324 addr += SF_PAGE_SIZE;
9325 cfg_data += SF_PAGE_SIZE;
9330 CH_ERR(adap, "boot config data %s failed %d\n",
9331 (size == 0 ? "clear" : "download"), ret);
9336 * t4_set_filter_mode - configure the optional components of filter tuples
9337 * @adap: the adapter
9338 * @mode_map: a bitmap selcting which optional filter components to enable
9339 * @sleep_ok: if true we may sleep while awaiting command completion
9341 * Sets the filter mode by selecting the optional components to enable
9342 * in filter tuples. Returns 0 on success and a negative error if the
9343 * requested mode needs more bits than are available for optional
9346 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map,
9349 static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
9353 for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
9354 if (mode_map & (1 << i))
9356 if (nbits > FILTER_OPT_LEN)
9358 t4_tp_pio_write(adap, &mode_map, 1, A_TP_VLAN_PRI_MAP, sleep_ok);
9359 read_filter_mode_and_ingress_config(adap, sleep_ok);
9365 * t4_clr_port_stats - clear port statistics
9366 * @adap: the adapter
9367 * @idx: the port index
9369 * Clear HW statistics for the given port.
9371 void t4_clr_port_stats(struct adapter *adap, int idx)
9374 u32 bgmap = t4_get_mps_bg_map(adap, idx);
9378 port_base_addr = PORT_BASE(idx);
9380 port_base_addr = T5_PORT_BASE(idx);
9382 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
9383 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
9384 t4_write_reg(adap, port_base_addr + i, 0);
9385 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
9386 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
9387 t4_write_reg(adap, port_base_addr + i, 0);
9388 for (i = 0; i < 4; i++)
9389 if (bgmap & (1 << i)) {
9391 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
9393 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
9398 * t4_i2c_rd - read I2C data from adapter
9399 * @adap: the adapter
9400 * @port: Port number if per-port device; <0 if not
9401 * @devid: per-port device ID or absolute device ID
9402 * @offset: byte offset into device I2C space
9403 * @len: byte length of I2C space data
9404 * @buf: buffer in which to return I2C data
9406 * Reads the I2C data from the indicated device and location.
9408 int t4_i2c_rd(struct adapter *adap, unsigned int mbox,
9409 int port, unsigned int devid,
9410 unsigned int offset, unsigned int len,
9414 struct fw_ldst_cmd ldst;
9420 len > sizeof ldst.u.i2c.data)
9423 memset(&ldst, 0, sizeof ldst);
9424 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
9425 ldst.op_to_addrspace =
9426 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
9430 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
9431 ldst.u.i2c.pid = (port < 0 ? 0xff : port);
9432 ldst.u.i2c.did = devid;
9433 ldst.u.i2c.boffset = offset;
9434 ldst.u.i2c.blen = len;
9435 ret = t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
9437 memcpy(buf, ldst.u.i2c.data, len);
9442 * t4_i2c_wr - write I2C data to adapter
9443 * @adap: the adapter
9444 * @port: Port number if per-port device; <0 if not
9445 * @devid: per-port device ID or absolute device ID
9446 * @offset: byte offset into device I2C space
9447 * @len: byte length of I2C space data
9448 * @buf: buffer containing new I2C data
9450 * Write the I2C data to the indicated device and location.
9452 int t4_i2c_wr(struct adapter *adap, unsigned int mbox,
9453 int port, unsigned int devid,
9454 unsigned int offset, unsigned int len,
9458 struct fw_ldst_cmd ldst;
9463 len > sizeof ldst.u.i2c.data)
9466 memset(&ldst, 0, sizeof ldst);
9467 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
9468 ldst.op_to_addrspace =
9469 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
9473 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
9474 ldst.u.i2c.pid = (port < 0 ? 0xff : port);
9475 ldst.u.i2c.did = devid;
9476 ldst.u.i2c.boffset = offset;
9477 ldst.u.i2c.blen = len;
9478 memcpy(ldst.u.i2c.data, buf, len);
9479 return t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
9483 * t4_sge_ctxt_rd - read an SGE context through FW
9484 * @adap: the adapter
9485 * @mbox: mailbox to use for the FW command
9486 * @cid: the context id
9487 * @ctype: the context type
9488 * @data: where to store the context data
9490 * Issues a FW command through the given mailbox to read an SGE context.
9492 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
9493 enum ctxt_type ctype, u32 *data)
9496 struct fw_ldst_cmd c;
9498 if (ctype == CTXT_EGRESS)
9499 ret = FW_LDST_ADDRSPC_SGE_EGRC;
9500 else if (ctype == CTXT_INGRESS)
9501 ret = FW_LDST_ADDRSPC_SGE_INGC;
9502 else if (ctype == CTXT_FLM)
9503 ret = FW_LDST_ADDRSPC_SGE_FLMC;
9505 ret = FW_LDST_ADDRSPC_SGE_CONMC;
9507 memset(&c, 0, sizeof(c));
9508 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
9509 F_FW_CMD_REQUEST | F_FW_CMD_READ |
9510 V_FW_LDST_CMD_ADDRSPACE(ret));
9511 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
9512 c.u.idctxt.physid = cpu_to_be32(cid);
9514 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
9516 data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
9517 data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
9518 data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
9519 data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
9520 data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
9521 data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
9527 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
9528 * @adap: the adapter
9529 * @cid: the context id
9530 * @ctype: the context type
9531 * @data: where to store the context data
9533 * Reads an SGE context directly, bypassing FW. This is only for
9534 * debugging when FW is unavailable.
9536 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
9541 t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
9542 ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
9544 for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
9545 *data++ = t4_read_reg(adap, i);
9549 int t4_sched_config(struct adapter *adapter, int type, int minmaxen,
9552 struct fw_sched_cmd cmd;
9554 memset(&cmd, 0, sizeof(cmd));
9555 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
9558 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9560 cmd.u.config.sc = FW_SCHED_SC_CONFIG;
9561 cmd.u.config.type = type;
9562 cmd.u.config.minmaxen = minmaxen;
9564 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
9568 int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
9569 int rateunit, int ratemode, int channel, int cl,
9570 int minrate, int maxrate, int weight, int pktsize,
9573 struct fw_sched_cmd cmd;
9575 memset(&cmd, 0, sizeof(cmd));
9576 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
9579 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9581 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
9582 cmd.u.params.type = type;
9583 cmd.u.params.level = level;
9584 cmd.u.params.mode = mode;
9585 cmd.u.params.ch = channel;
9586 cmd.u.params.cl = cl;
9587 cmd.u.params.unit = rateunit;
9588 cmd.u.params.rate = ratemode;
9589 cmd.u.params.min = cpu_to_be32(minrate);
9590 cmd.u.params.max = cpu_to_be32(maxrate);
9591 cmd.u.params.weight = cpu_to_be16(weight);
9592 cmd.u.params.pktsize = cpu_to_be16(pktsize);
9594 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
9598 int t4_sched_params_ch_rl(struct adapter *adapter, int channel, int ratemode,
9599 unsigned int maxrate, int sleep_ok)
9601 struct fw_sched_cmd cmd;
9603 memset(&cmd, 0, sizeof(cmd));
9604 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
9607 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9609 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
9610 cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
9611 cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CH_RL;
9612 cmd.u.params.ch = channel;
9613 cmd.u.params.rate = ratemode; /* REL or ABS */
9614 cmd.u.params.max = cpu_to_be32(maxrate);/* % or kbps */
9616 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
9620 int t4_sched_params_cl_wrr(struct adapter *adapter, int channel, int cl,
9621 int weight, int sleep_ok)
9623 struct fw_sched_cmd cmd;
9625 if (weight < 0 || weight > 100)
9628 memset(&cmd, 0, sizeof(cmd));
9629 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
9632 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9634 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
9635 cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
9636 cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_WRR;
9637 cmd.u.params.ch = channel;
9638 cmd.u.params.cl = cl;
9639 cmd.u.params.weight = cpu_to_be16(weight);
9641 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
9645 int t4_sched_params_cl_rl_kbps(struct adapter *adapter, int channel, int cl,
9646 int mode, unsigned int maxrate, int pktsize, int sleep_ok)
9648 struct fw_sched_cmd cmd;
9650 memset(&cmd, 0, sizeof(cmd));
9651 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
9654 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9656 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
9657 cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
9658 cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_RL;
9659 cmd.u.params.mode = mode;
9660 cmd.u.params.ch = channel;
9661 cmd.u.params.cl = cl;
9662 cmd.u.params.unit = FW_SCHED_PARAMS_UNIT_BITRATE;
9663 cmd.u.params.rate = FW_SCHED_PARAMS_RATE_ABS;
9664 cmd.u.params.max = cpu_to_be32(maxrate);
9665 cmd.u.params.pktsize = cpu_to_be16(pktsize);
9667 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
9672 * t4_config_watchdog - configure (enable/disable) a watchdog timer
9673 * @adapter: the adapter
9674 * @mbox: mailbox to use for the FW command
9675 * @pf: the PF owning the queue
9676 * @vf: the VF owning the queue
9677 * @timeout: watchdog timeout in ms
9678 * @action: watchdog timer / action
9680 * There are separate watchdog timers for each possible watchdog
9681 * action. Configure one of the watchdog timers by setting a non-zero
9682 * timeout. Disable a watchdog timer by using a timeout of zero.
9684 int t4_config_watchdog(struct adapter *adapter, unsigned int mbox,
9685 unsigned int pf, unsigned int vf,
9686 unsigned int timeout, unsigned int action)
9688 struct fw_watchdog_cmd wdog;
9692 * The watchdog command expects a timeout in units of 10ms so we need
9693 * to convert it here (via rounding) and force a minimum of one 10ms
9694 * "tick" if the timeout is non-zero but the convertion results in 0
9697 ticks = (timeout + 5)/10;
9698 if (timeout && !ticks)
9701 memset(&wdog, 0, sizeof wdog);
9702 wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) |
9705 V_FW_PARAMS_CMD_PFN(pf) |
9706 V_FW_PARAMS_CMD_VFN(vf));
9707 wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog));
9708 wdog.timeout = cpu_to_be32(ticks);
9709 wdog.action = cpu_to_be32(action);
9711 return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL);
9714 int t4_get_devlog_level(struct adapter *adapter, unsigned int *level)
9716 struct fw_devlog_cmd devlog_cmd;
9719 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
9720 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
9721 F_FW_CMD_REQUEST | F_FW_CMD_READ);
9722 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
9723 ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
9724 sizeof(devlog_cmd), &devlog_cmd);
9728 *level = devlog_cmd.level;
9732 int t4_set_devlog_level(struct adapter *adapter, unsigned int level)
9734 struct fw_devlog_cmd devlog_cmd;
9736 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
9737 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
9740 devlog_cmd.level = level;
9741 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
9742 return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
9743 sizeof(devlog_cmd), &devlog_cmd);