2 * Copyright (c) 2012, 2016 Chelsio Communications, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
34 #include "t4_regs_values.h"
35 #include "firmware/t4fw_interface.h"
38 #define msleep(x) do { \
42 pause("t4hw", (x) * hz / 1000); \
46 * t4_wait_op_done_val - wait until an operation is completed
47 * @adapter: the adapter performing the operation
48 * @reg: the register to check for completion
49 * @mask: a single-bit field within @reg that indicates completion
50 * @polarity: the value of the field when the operation is completed
51 * @attempts: number of check iterations
52 * @delay: delay in usecs between iterations
53 * @valp: where to store the value of the register at completion time
55 * Wait until an operation is completed by checking a bit in a register
56 * up to @attempts times. If @valp is not NULL the value of the register
57 * at the time it indicated completion is stored there. Returns 0 if the
58 * operation completes and -EAGAIN otherwise.
60 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
61 int polarity, int attempts, int delay, u32 *valp)
64 u32 val = t4_read_reg(adapter, reg);
66 if (!!(val & mask) == polarity) {
78 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
79 int polarity, int attempts, int delay)
81 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
86 * t4_set_reg_field - set a register field to a value
87 * @adapter: the adapter to program
88 * @addr: the register address
89 * @mask: specifies the portion of the register to modify
90 * @val: the new value for the register field
92 * Sets a register field specified by the supplied mask to the
95 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
98 u32 v = t4_read_reg(adapter, addr) & ~mask;
100 t4_write_reg(adapter, addr, v | val);
101 (void) t4_read_reg(adapter, addr); /* flush */
105 * t4_read_indirect - read indirectly addressed registers
107 * @addr_reg: register holding the indirect address
108 * @data_reg: register holding the value of the indirect register
109 * @vals: where the read register values are stored
110 * @nregs: how many indirect registers to read
111 * @start_idx: index of first indirect register to read
113 * Reads registers that are accessed indirectly through an address/data
116 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
117 unsigned int data_reg, u32 *vals,
118 unsigned int nregs, unsigned int start_idx)
121 t4_write_reg(adap, addr_reg, start_idx);
122 *vals++ = t4_read_reg(adap, data_reg);
128 * t4_write_indirect - write indirectly addressed registers
130 * @addr_reg: register holding the indirect addresses
131 * @data_reg: register holding the value for the indirect registers
132 * @vals: values to write
133 * @nregs: how many indirect registers to write
134 * @start_idx: address of first indirect register to write
136 * Writes a sequential block of registers that are accessed indirectly
137 * through an address/data register pair.
139 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
140 unsigned int data_reg, const u32 *vals,
141 unsigned int nregs, unsigned int start_idx)
144 t4_write_reg(adap, addr_reg, start_idx++);
145 t4_write_reg(adap, data_reg, *vals++);
150 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
151 * mechanism. This guarantees that we get the real value even if we're
152 * operating within a Virtual Machine and the Hypervisor is trapping our
153 * Configuration Space accesses.
155 * N.B. This routine should only be used as a last resort: the firmware uses
156 * the backdoor registers on a regular basis and we can end up
157 * conflicting with it's uses!
159 u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
161 u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg);
164 if (chip_id(adap) <= CHELSIO_T5)
172 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req);
173 val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
176 * Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
177 * Configuration Space read. (None of the other fields matter when
178 * F_ENABLE is 0 so a simple register write is easier than a
179 * read-modify-write via t4_set_reg_field().)
181 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0);
187 * t4_report_fw_error - report firmware error
190 * The adapter firmware can indicate error conditions to the host.
191 * If the firmware has indicated an error, print out the reason for
192 * the firmware error.
194 static void t4_report_fw_error(struct adapter *adap)
196 static const char *const reason[] = {
197 "Crash", /* PCIE_FW_EVAL_CRASH */
198 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
199 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
200 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
201 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
202 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
203 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
204 "Reserved", /* reserved */
208 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
209 if (pcie_fw & F_PCIE_FW_ERR)
210 CH_ERR(adap, "Firmware reports adapter error: %s\n",
211 reason[G_PCIE_FW_EVAL(pcie_fw)]);
215 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
217 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
220 for ( ; nflit; nflit--, mbox_addr += 8)
221 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
225 * Handle a FW assertion reported in a mailbox.
227 static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt)
230 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
231 asrt->u.assert.filename_0_7,
232 be32_to_cpu(asrt->u.assert.line),
233 be32_to_cpu(asrt->u.assert.x),
234 be32_to_cpu(asrt->u.assert.y));
237 #define X_CIM_PF_NOACCESS 0xeeeeeeee
239 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
241 * @mbox: index of the mailbox to use
242 * @cmd: the command to write
243 * @size: command length in bytes
244 * @rpl: where to optionally store the reply
245 * @sleep_ok: if true we may sleep while awaiting command completion
246 * @timeout: time to wait for command to finish before timing out
247 * (negative implies @sleep_ok=false)
249 * Sends the given command to FW through the selected mailbox and waits
250 * for the FW to execute the command. If @rpl is not %NULL it is used to
251 * store the FW's reply to the command. The command and its optional
252 * reply are of the same length. Some FW commands like RESET and
253 * INITIALIZE can take a considerable amount of time to execute.
254 * @sleep_ok determines whether we may sleep while awaiting the response.
255 * If sleeping is allowed we use progressive backoff otherwise we spin.
256 * Note that passing in a negative @timeout is an alternate mechanism
257 * for specifying @sleep_ok=false. This is useful when a higher level
258 * interface allows for specification of @timeout but not @sleep_ok ...
260 * The return value is 0 on success or a negative errno on failure. A
261 * failure can happen either because we are not able to execute the
262 * command or FW executes it but signals an error. In the latter case
263 * the return value is the error code indicated by FW (negated).
265 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
266 int size, void *rpl, bool sleep_ok, int timeout)
269 * We delay in small increments at first in an effort to maintain
270 * responsiveness for simple, fast executing commands but then back
271 * off to larger delays to a maximum retry delay.
273 static const int delay[] = {
274 1, 1, 3, 5, 10, 10, 20, 50, 100
278 int i, ms, delay_idx, ret;
279 const __be64 *p = cmd;
280 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
281 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
283 __be64 cmd_rpl[MBOX_LEN/8];
286 if ((size & 15) || size > MBOX_LEN)
289 if (adap->flags & IS_VF) {
291 data_reg = FW_T6VF_MBDATA_BASE_ADDR;
293 data_reg = FW_T4VF_MBDATA_BASE_ADDR;
294 ctl_reg = VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL);
298 * If we have a negative timeout, that implies that we can't sleep.
306 * Attempt to gain access to the mailbox.
308 for (i = 0; i < 4; i++) {
309 ctl = t4_read_reg(adap, ctl_reg);
311 if (v != X_MBOWNER_NONE)
316 * If we were unable to gain access, dequeue ourselves from the
317 * mailbox atomic access list and report the error to our caller.
319 if (v != X_MBOWNER_PL) {
320 t4_report_fw_error(adap);
321 ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
326 * If we gain ownership of the mailbox and there's a "valid" message
327 * in it, this is likely an asynchronous error message from the
328 * firmware. So we'll report that and then proceed on with attempting
329 * to issue our own command ... which may well fail if the error
330 * presaged the firmware crashing ...
332 if (ctl & F_MBMSGVALID) {
333 CH_ERR(adap, "found VALID command in mbox %u: "
334 "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
335 (unsigned long long)t4_read_reg64(adap, data_reg),
336 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
337 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
338 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
339 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
340 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
341 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
342 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
346 * Copy in the new mailbox command and send it on its way ...
348 for (i = 0; i < size; i += 8, p++)
349 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
351 if (adap->flags & IS_VF) {
353 * For the VFs, the Mailbox Data "registers" are
354 * actually backed by T4's "MA" interface rather than
355 * PL Registers (as is the case for the PFs). Because
356 * these are in different coherency domains, the write
357 * to the VF's PL-register-backed Mailbox Control can
358 * race in front of the writes to the MA-backed VF
359 * Mailbox Data "registers". So we need to do a
360 * read-back on at least one byte of the VF Mailbox
361 * Data registers before doing the write to the VF
362 * Mailbox Control register.
364 t4_read_reg(adap, data_reg);
367 CH_DUMP_MBOX(adap, mbox, data_reg);
369 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
370 t4_read_reg(adap, ctl_reg); /* flush write */
376 * Loop waiting for the reply; bail out if we time out or the firmware
380 for (i = 0; i < timeout; i += ms) {
381 if (!(adap->flags & IS_VF)) {
382 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
383 if (pcie_fw & F_PCIE_FW_ERR)
387 ms = delay[delay_idx]; /* last element may repeat */
388 if (delay_idx < ARRAY_SIZE(delay) - 1)
395 v = t4_read_reg(adap, ctl_reg);
396 if (v == X_CIM_PF_NOACCESS)
398 if (G_MBOWNER(v) == X_MBOWNER_PL) {
399 if (!(v & F_MBMSGVALID)) {
400 t4_write_reg(adap, ctl_reg,
401 V_MBOWNER(X_MBOWNER_NONE));
406 * Retrieve the command reply and release the mailbox.
408 get_mbox_rpl(adap, cmd_rpl, MBOX_LEN/8, data_reg);
409 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
411 CH_DUMP_MBOX(adap, mbox, data_reg);
413 res = be64_to_cpu(cmd_rpl[0]);
414 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
415 fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl);
416 res = V_FW_CMD_RETVAL(EIO);
418 memcpy(rpl, cmd_rpl, size);
419 return -G_FW_CMD_RETVAL((int)res);
424 * We timed out waiting for a reply to our mailbox command. Report
425 * the error and also check to see if the firmware reported any
428 ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
429 CH_ERR(adap, "command %#x in mailbox %d timed out\n",
430 *(const u8 *)cmd, mbox);
432 /* If DUMP_MBOX is set the mbox has already been dumped */
433 if ((adap->debug_flags & DF_DUMP_MBOX) == 0) {
435 CH_ERR(adap, "mbox: %016llx %016llx %016llx %016llx "
436 "%016llx %016llx %016llx %016llx\n",
437 (unsigned long long)be64_to_cpu(p[0]),
438 (unsigned long long)be64_to_cpu(p[1]),
439 (unsigned long long)be64_to_cpu(p[2]),
440 (unsigned long long)be64_to_cpu(p[3]),
441 (unsigned long long)be64_to_cpu(p[4]),
442 (unsigned long long)be64_to_cpu(p[5]),
443 (unsigned long long)be64_to_cpu(p[6]),
444 (unsigned long long)be64_to_cpu(p[7]));
447 t4_report_fw_error(adap);
452 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
453 void *rpl, bool sleep_ok)
455 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl,
456 sleep_ok, FW_CMD_MAX_TIMEOUT);
460 static int t4_edc_err_read(struct adapter *adap, int idx)
462 u32 edc_ecc_err_addr_reg;
463 u32 edc_bist_status_rdata_reg;
466 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
469 if (idx != 0 && idx != 1) {
470 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
474 edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx);
475 edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx);
478 "edc%d err addr 0x%x: 0x%x.\n",
479 idx, edc_ecc_err_addr_reg,
480 t4_read_reg(adap, edc_ecc_err_addr_reg));
482 "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
483 edc_bist_status_rdata_reg,
484 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg),
485 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8),
486 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16),
487 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24),
488 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32),
489 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40),
490 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48),
491 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56),
492 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64));
498 * t4_mc_read - read from MC through backdoor accesses
500 * @idx: which MC to access
501 * @addr: address of first byte requested
502 * @data: 64 bytes of data containing the requested address
503 * @ecc: where to store the corresponding 64-bit ECC word
505 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
506 * that covers the requested address @addr. If @parity is not %NULL it
507 * is assigned the 64-bit ECC word for the read data.
509 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
512 u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
513 u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
516 mc_bist_cmd_reg = A_MC_BIST_CMD;
517 mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR;
518 mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN;
519 mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA;
520 mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN;
522 mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx);
523 mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx);
524 mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx);
525 mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA,
527 mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN,
531 if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST)
533 t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU);
534 t4_write_reg(adap, mc_bist_cmd_len_reg, 64);
535 t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc);
536 t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) |
537 F_START_BIST | V_BIST_CMD_GAP(1));
538 i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
542 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i)
544 for (i = 15; i >= 0; i--)
545 *data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
547 *ecc = t4_read_reg64(adap, MC_DATA(16));
553 * t4_edc_read - read from EDC through backdoor accesses
555 * @idx: which EDC to access
556 * @addr: address of first byte requested
557 * @data: 64 bytes of data containing the requested address
558 * @ecc: where to store the corresponding 64-bit ECC word
560 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
561 * that covers the requested address @addr. If @parity is not %NULL it
562 * is assigned the 64-bit ECC word for the read data.
564 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
567 u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
568 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
571 edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx);
572 edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx);
573 edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx);
574 edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN,
576 edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA,
580 * These macro are missing in t4_regs.h file.
581 * Added temporarily for testing.
583 #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
584 #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
585 edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx);
586 edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx);
587 edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx);
588 edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN,
590 edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA,
596 if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST)
598 t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU);
599 t4_write_reg(adap, edc_bist_cmd_len_reg, 64);
600 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
601 t4_write_reg(adap, edc_bist_cmd_reg,
602 V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
603 i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
607 #define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i)
609 for (i = 15; i >= 0; i--)
610 *data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
612 *ecc = t4_read_reg64(adap, EDC_DATA(16));
618 * t4_mem_read - read EDC 0, EDC 1 or MC into buffer
620 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
621 * @addr: address within indicated memory type
622 * @len: amount of memory to read
623 * @buf: host memory buffer
625 * Reads an [almost] arbitrary memory region in the firmware: the
626 * firmware memory address, length and host buffer must be aligned on
627 * 32-bit boudaries. The memory is returned as a raw byte sequence from
628 * the firmware's memory. If this memory contains data structures which
629 * contain multi-byte integers, it's the callers responsibility to
630 * perform appropriate byte order conversions.
632 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
635 u32 pos, start, end, offset;
639 * Argument sanity checks ...
641 if ((addr & 0x3) || (len & 0x3))
645 * The underlaying EDC/MC read routines read 64 bytes at a time so we
646 * need to round down the start and round up the end. We'll start
647 * copying out of the first line at (addr - start) a word at a time.
649 start = addr & ~(64-1);
650 end = (addr + len + 64-1) & ~(64-1);
651 offset = (addr - start)/sizeof(__be32);
653 for (pos = start; pos < end; pos += 64, offset = 0) {
657 * Read the chip's memory block and bail if there's an error.
659 if ((mtype == MEM_MC) || (mtype == MEM_MC1))
660 ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL);
662 ret = t4_edc_read(adap, mtype, pos, data, NULL);
667 * Copy the data into the caller's memory buffer.
669 while (offset < 16 && len > 0) {
670 *buf++ = data[offset++];
671 len -= sizeof(__be32);
679 * Return the specified PCI-E Configuration Space register from our Physical
680 * Function. We try first via a Firmware LDST Command (if fw_attach != 0)
681 * since we prefer to let the firmware own all of these registers, but if that
682 * fails we go for it directly ourselves.
684 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach)
688 * If fw_attach != 0, construct and send the Firmware LDST Command to
689 * retrieve the specified PCI-E Configuration Space register.
691 if (drv_fw_attach != 0) {
692 struct fw_ldst_cmd ldst_cmd;
695 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
696 ldst_cmd.op_to_addrspace =
697 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
700 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
701 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
702 ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1);
703 ldst_cmd.u.pcie.ctrl_to_fn =
704 (F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf));
705 ldst_cmd.u.pcie.r = reg;
708 * If the LDST Command succeeds, return the result, otherwise
709 * fall through to reading it directly ourselves ...
711 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
714 return be32_to_cpu(ldst_cmd.u.pcie.data[0]);
716 CH_WARN(adap, "Firmware failed to return "
717 "Configuration Space register %d, err = %d\n",
722 * Read the desired Configuration Space register via the PCI-E
723 * Backdoor mechanism.
725 return t4_hw_pci_read_cfg4(adap, reg);
729 * t4_get_regs_len - return the size of the chips register set
730 * @adapter: the adapter
732 * Returns the size of the chip's BAR0 register space.
734 unsigned int t4_get_regs_len(struct adapter *adapter)
736 unsigned int chip_version = chip_id(adapter);
738 switch (chip_version) {
740 if (adapter->flags & IS_VF)
741 return FW_T4VF_REGMAP_SIZE;
742 return T4_REGMAP_SIZE;
746 if (adapter->flags & IS_VF)
747 return FW_T4VF_REGMAP_SIZE;
748 return T5_REGMAP_SIZE;
752 "Unsupported chip version %d\n", chip_version);
757 * t4_get_regs - read chip registers into provided buffer
759 * @buf: register buffer
760 * @buf_size: size (in bytes) of register buffer
762 * If the provided register buffer isn't large enough for the chip's
763 * full register range, the register dump will be truncated to the
764 * register buffer's size.
766 void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
768 static const unsigned int t4_reg_ranges[] = {
1226 static const unsigned int t4vf_reg_ranges[] = {
1227 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
1228 VF_MPS_REG(A_MPS_VF_CTL),
1229 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
1230 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_WHOAMI),
1231 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
1232 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
1233 FW_T4VF_MBDATA_BASE_ADDR,
1234 FW_T4VF_MBDATA_BASE_ADDR +
1235 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
1238 static const unsigned int t5_reg_ranges[] = {
2013 static const unsigned int t5vf_reg_ranges[] = {
2014 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
2015 VF_MPS_REG(A_MPS_VF_CTL),
2016 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
2017 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION),
2018 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
2019 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
2020 FW_T4VF_MBDATA_BASE_ADDR,
2021 FW_T4VF_MBDATA_BASE_ADDR +
2022 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
2025 static const unsigned int t6_reg_ranges[] = {
2602 static const unsigned int t6vf_reg_ranges[] = {
2603 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
2604 VF_MPS_REG(A_MPS_VF_CTL),
2605 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
2606 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION),
2607 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
2608 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
2609 FW_T6VF_MBDATA_BASE_ADDR,
2610 FW_T6VF_MBDATA_BASE_ADDR +
2611 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
2614 u32 *buf_end = (u32 *)(buf + buf_size);
2615 const unsigned int *reg_ranges;
2616 int reg_ranges_size, range;
2617 unsigned int chip_version = chip_id(adap);
2620 * Select the right set of register ranges to dump depending on the
2621 * adapter chip type.
2623 switch (chip_version) {
2625 if (adap->flags & IS_VF) {
2626 reg_ranges = t4vf_reg_ranges;
2627 reg_ranges_size = ARRAY_SIZE(t4vf_reg_ranges);
2629 reg_ranges = t4_reg_ranges;
2630 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
2635 if (adap->flags & IS_VF) {
2636 reg_ranges = t5vf_reg_ranges;
2637 reg_ranges_size = ARRAY_SIZE(t5vf_reg_ranges);
2639 reg_ranges = t5_reg_ranges;
2640 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
2645 if (adap->flags & IS_VF) {
2646 reg_ranges = t6vf_reg_ranges;
2647 reg_ranges_size = ARRAY_SIZE(t6vf_reg_ranges);
2649 reg_ranges = t6_reg_ranges;
2650 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
2656 "Unsupported chip version %d\n", chip_version);
2661 * Clear the register buffer and insert the appropriate register
2662 * values selected by the above register ranges.
2664 memset(buf, 0, buf_size);
2665 for (range = 0; range < reg_ranges_size; range += 2) {
2666 unsigned int reg = reg_ranges[range];
2667 unsigned int last_reg = reg_ranges[range + 1];
2668 u32 *bufp = (u32 *)(buf + reg);
2671 * Iterate across the register range filling in the register
2672 * buffer but don't write past the end of the register buffer.
2674 while (reg <= last_reg && bufp < buf_end) {
2675 *bufp++ = t4_read_reg(adap, reg);
2682 * Partial EEPROM Vital Product Data structure. Includes only the ID and
2694 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
2696 #define EEPROM_DELAY 10 /* 10us per poll spin */
2697 #define EEPROM_MAX_POLL 5000 /* x 5000 == 50ms */
2699 #define EEPROM_STAT_ADDR 0x7bfc
2700 #define VPD_BASE 0x400
2701 #define VPD_BASE_OLD 0
2702 #define VPD_LEN 1024
2703 #define VPD_INFO_FLD_HDR_SIZE 3
2704 #define CHELSIO_VPD_UNIQUE_ID 0x82
2707 * Small utility function to wait till any outstanding VPD Access is complete.
2708 * We have a per-adapter state variable "VPD Busy" to indicate when we have a
2709 * VPD Access in flight. This allows us to handle the problem of having a
2710 * previous VPD Access time out and prevent an attempt to inject a new VPD
2711 * Request before any in-flight VPD reguest has completed.
2713 static int t4_seeprom_wait(struct adapter *adapter)
2715 unsigned int base = adapter->params.pci.vpd_cap_addr;
2719 * If no VPD Access is in flight, we can just return success right
2722 if (!adapter->vpd_busy)
2726 * Poll the VPD Capability Address/Flag register waiting for it
2727 * to indicate that the operation is complete.
2729 max_poll = EEPROM_MAX_POLL;
2733 udelay(EEPROM_DELAY);
2734 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
2737 * If the operation is complete, mark the VPD as no longer
2738 * busy and return success.
2740 if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
2741 adapter->vpd_busy = 0;
2744 } while (--max_poll);
2747 * Failure! Note that we leave the VPD Busy status set in order to
2748 * avoid pushing a new VPD Access request into the VPD Capability till
2749 * the current operation eventually succeeds. It's a bug to issue a
2750 * new request when an existing request is in flight and will result
2751 * in corrupt hardware state.
2757 * t4_seeprom_read - read a serial EEPROM location
2758 * @adapter: adapter to read
2759 * @addr: EEPROM virtual address
2760 * @data: where to store the read data
2762 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
2763 * VPD capability. Note that this function must be called with a virtual
2766 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
2768 unsigned int base = adapter->params.pci.vpd_cap_addr;
2772 * VPD Accesses must alway be 4-byte aligned!
2774 if (addr >= EEPROMVSIZE || (addr & 3))
2778 * Wait for any previous operation which may still be in flight to
2781 ret = t4_seeprom_wait(adapter);
2783 CH_ERR(adapter, "VPD still busy from previous operation\n");
2788 * Issue our new VPD Read request, mark the VPD as being busy and wait
2789 * for our request to complete. If it doesn't complete, note the
2790 * error and return it to our caller. Note that we do not reset the
2793 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
2794 adapter->vpd_busy = 1;
2795 adapter->vpd_flag = PCI_VPD_ADDR_F;
2796 ret = t4_seeprom_wait(adapter);
2798 CH_ERR(adapter, "VPD read of address %#x failed\n", addr);
2803 * Grab the returned data, swizzle it into our endianess and
2806 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
2807 *data = le32_to_cpu(*data);
2812 * t4_seeprom_write - write a serial EEPROM location
2813 * @adapter: adapter to write
2814 * @addr: virtual EEPROM address
2815 * @data: value to write
2817 * Write a 32-bit word to a location in serial EEPROM using the card's PCI
2818 * VPD capability. Note that this function must be called with a virtual
2821 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
2823 unsigned int base = adapter->params.pci.vpd_cap_addr;
2829 * VPD Accesses must alway be 4-byte aligned!
2831 if (addr >= EEPROMVSIZE || (addr & 3))
2835 * Wait for any previous operation which may still be in flight to
2838 ret = t4_seeprom_wait(adapter);
2840 CH_ERR(adapter, "VPD still busy from previous operation\n");
2845 * Issue our new VPD Read request, mark the VPD as being busy and wait
2846 * for our request to complete. If it doesn't complete, note the
2847 * error and return it to our caller. Note that we do not reset the
2850 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
2852 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
2853 (u16)addr | PCI_VPD_ADDR_F);
2854 adapter->vpd_busy = 1;
2855 adapter->vpd_flag = 0;
2856 ret = t4_seeprom_wait(adapter);
2858 CH_ERR(adapter, "VPD write of address %#x failed\n", addr);
2863 * Reset PCI_VPD_DATA register after a transaction and wait for our
2864 * request to complete. If it doesn't complete, return error.
2866 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
2867 max_poll = EEPROM_MAX_POLL;
2869 udelay(EEPROM_DELAY);
2870 t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
2871 } while ((stats_reg & 0x1) && --max_poll);
2875 /* Return success! */
2880 * t4_eeprom_ptov - translate a physical EEPROM address to virtual
2881 * @phys_addr: the physical EEPROM address
2882 * @fn: the PCI function number
2883 * @sz: size of function-specific area
2885 * Translate a physical EEPROM address to virtual. The first 1K is
2886 * accessed through virtual addresses starting at 31K, the rest is
2887 * accessed through virtual addresses starting at 0.
2889 * The mapping is as follows:
2890 * [0..1K) -> [31K..32K)
2891 * [1K..1K+A) -> [ES-A..ES)
2892 * [1K+A..ES) -> [0..ES-A-1K)
2894 * where A = @fn * @sz, and ES = EEPROM size.
2896 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2899 if (phys_addr < 1024)
2900 return phys_addr + (31 << 10);
2901 if (phys_addr < 1024 + fn)
2902 return EEPROMSIZE - fn + phys_addr - 1024;
2903 if (phys_addr < EEPROMSIZE)
2904 return phys_addr - 1024 - fn;
2909 * t4_seeprom_wp - enable/disable EEPROM write protection
2910 * @adapter: the adapter
2911 * @enable: whether to enable or disable write protection
2913 * Enables or disables write protection on the serial EEPROM.
2915 int t4_seeprom_wp(struct adapter *adapter, int enable)
2917 return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
2921 * get_vpd_keyword_val - Locates an information field keyword in the VPD
2922 * @v: Pointer to buffered vpd data structure
2923 * @kw: The keyword to search for
2925 * Returns the value of the information field keyword or
2926 * -ENOENT otherwise.
2928 static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
2931 unsigned int offset , len;
2932 const u8 *buf = (const u8 *)v;
2933 const u8 *vpdr_len = &v->vpdr_len[0];
2934 offset = sizeof(struct t4_vpd_hdr);
2935 len = (u16)vpdr_len[0] + ((u16)vpdr_len[1] << 8);
2937 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
2941 for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
2942 if(memcmp(buf + i , kw , 2) == 0){
2943 i += VPD_INFO_FLD_HDR_SIZE;
2947 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
2955 * get_vpd_params - read VPD parameters from VPD EEPROM
2956 * @adapter: adapter to read
2957 * @p: where to store the parameters
2958 * @vpd: caller provided temporary space to read the VPD into
2960 * Reads card parameters stored in VPD EEPROM.
2962 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p,
2968 const struct t4_vpd_hdr *v;
2971 * Card information normally starts at VPD_BASE but early cards had
2974 ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd));
2979 * The VPD shall have a unique identifier specified by the PCI SIG.
2980 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
2981 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
2982 * is expected to automatically put this entry at the
2983 * beginning of the VPD.
2985 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
2987 for (i = 0; i < VPD_LEN; i += 4) {
2988 ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i));
2992 v = (const struct t4_vpd_hdr *)vpd;
2994 #define FIND_VPD_KW(var,name) do { \
2995 var = get_vpd_keyword_val(v , name); \
2997 CH_ERR(adapter, "missing VPD keyword " name "\n"); \
3002 FIND_VPD_KW(i, "RV");
3003 for (csum = 0; i >= 0; i--)
3008 "corrupted VPD EEPROM, actual csum %u\n", csum);
3012 FIND_VPD_KW(ec, "EC");
3013 FIND_VPD_KW(sn, "SN");
3014 FIND_VPD_KW(pn, "PN");
3015 FIND_VPD_KW(na, "NA");
3018 memcpy(p->id, v->id_data, ID_LEN);
3020 memcpy(p->ec, vpd + ec, EC_LEN);
3022 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
3023 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
3025 i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2];
3026 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
3027 strstrip((char *)p->pn);
3028 i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2];
3029 memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
3030 strstrip((char *)p->na);
3035 /* serial flash and firmware constants and flash config file constants */
3037 SF_ATTEMPTS = 10, /* max retries for SF operations */
3039 /* flash command opcodes */
3040 SF_PROG_PAGE = 2, /* program page */
3041 SF_WR_DISABLE = 4, /* disable writes */
3042 SF_RD_STATUS = 5, /* read status register */
3043 SF_WR_ENABLE = 6, /* enable writes */
3044 SF_RD_DATA_FAST = 0xb, /* read flash */
3045 SF_RD_ID = 0x9f, /* read ID */
3046 SF_ERASE_SECTOR = 0xd8, /* erase sector */
3050 * sf1_read - read data from the serial flash
3051 * @adapter: the adapter
3052 * @byte_cnt: number of bytes to read
3053 * @cont: whether another operation will be chained
3054 * @lock: whether to lock SF for PL access only
3055 * @valp: where to store the read data
3057 * Reads up to 4 bytes of data from the serial flash. The location of
3058 * the read needs to be specified prior to calling this by issuing the
3059 * appropriate commands to the serial flash.
3061 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
3062 int lock, u32 *valp)
3066 if (!byte_cnt || byte_cnt > 4)
3068 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3070 t4_write_reg(adapter, A_SF_OP,
3071 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
3072 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3074 *valp = t4_read_reg(adapter, A_SF_DATA);
3079 * sf1_write - write data to the serial flash
3080 * @adapter: the adapter
3081 * @byte_cnt: number of bytes to write
3082 * @cont: whether another operation will be chained
3083 * @lock: whether to lock SF for PL access only
3084 * @val: value to write
3086 * Writes up to 4 bytes of data to the serial flash. The location of
3087 * the write needs to be specified prior to calling this by issuing the
3088 * appropriate commands to the serial flash.
3090 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
3093 if (!byte_cnt || byte_cnt > 4)
3095 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3097 t4_write_reg(adapter, A_SF_DATA, val);
3098 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
3099 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
3100 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3104 * flash_wait_op - wait for a flash operation to complete
3105 * @adapter: the adapter
3106 * @attempts: max number of polls of the status register
3107 * @delay: delay between polls in ms
3109 * Wait for a flash operation to complete by polling the status register.
3111 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
3117 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
3118 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
3122 if (--attempts == 0)
3130 * t4_read_flash - read words from serial flash
3131 * @adapter: the adapter
3132 * @addr: the start address for the read
3133 * @nwords: how many 32-bit words to read
3134 * @data: where to store the read data
3135 * @byte_oriented: whether to store data as bytes or as words
3137 * Read the specified number of 32-bit words from the serial flash.
3138 * If @byte_oriented is set the read data is stored as a byte array
3139 * (i.e., big-endian), otherwise as 32-bit words in the platform's
3140 * natural endianness.
3142 int t4_read_flash(struct adapter *adapter, unsigned int addr,
3143 unsigned int nwords, u32 *data, int byte_oriented)
3147 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
3150 addr = swab32(addr) | SF_RD_DATA_FAST;
3152 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
3153 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
3156 for ( ; nwords; nwords--, data++) {
3157 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
3159 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3163 *data = (__force __u32)(cpu_to_be32(*data));
3169 * t4_write_flash - write up to a page of data to the serial flash
3170 * @adapter: the adapter
3171 * @addr: the start address to write
3172 * @n: length of data to write in bytes
3173 * @data: the data to write
3174 * @byte_oriented: whether to store data as bytes or as words
3176 * Writes up to a page of data (256 bytes) to the serial flash starting
3177 * at the given address. All the data must be written to the same page.
3178 * If @byte_oriented is set the write data is stored as byte stream
3179 * (i.e. matches what on disk), otherwise in big-endian.
3181 int t4_write_flash(struct adapter *adapter, unsigned int addr,
3182 unsigned int n, const u8 *data, int byte_oriented)
3185 u32 buf[SF_PAGE_SIZE / 4];
3186 unsigned int i, c, left, val, offset = addr & 0xff;
3188 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
3191 val = swab32(addr) | SF_PROG_PAGE;
3193 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3194 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
3197 for (left = n; left; left -= c) {
3199 for (val = 0, i = 0; i < c; ++i)
3200 val = (val << 8) + *data++;
3203 val = cpu_to_be32(val);
3205 ret = sf1_write(adapter, c, c != left, 1, val);
3209 ret = flash_wait_op(adapter, 8, 1);
3213 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3215 /* Read the page to verify the write succeeded */
3216 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
3221 if (memcmp(data - n, (u8 *)buf + offset, n)) {
3223 "failed to correctly write the flash page at %#x\n",
3230 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3235 * t4_get_fw_version - read the firmware version
3236 * @adapter: the adapter
3237 * @vers: where to place the version
3239 * Reads the FW version from flash.
3241 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
3243 return t4_read_flash(adapter, FLASH_FW_START +
3244 offsetof(struct fw_hdr, fw_ver), 1,
3249 * t4_get_bs_version - read the firmware bootstrap version
3250 * @adapter: the adapter
3251 * @vers: where to place the version
3253 * Reads the FW Bootstrap version from flash.
3255 int t4_get_bs_version(struct adapter *adapter, u32 *vers)
3257 return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
3258 offsetof(struct fw_hdr, fw_ver), 1,
3263 * t4_get_tp_version - read the TP microcode version
3264 * @adapter: the adapter
3265 * @vers: where to place the version
3267 * Reads the TP microcode version from flash.
3269 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
3271 return t4_read_flash(adapter, FLASH_FW_START +
3272 offsetof(struct fw_hdr, tp_microcode_ver),
3277 * t4_get_exprom_version - return the Expansion ROM version (if any)
3278 * @adapter: the adapter
3279 * @vers: where to place the version
3281 * Reads the Expansion ROM header from FLASH and returns the version
3282 * number (if present) through the @vers return value pointer. We return
3283 * this in the Firmware Version Format since it's convenient. Return
3284 * 0 on success, -ENOENT if no Expansion ROM is present.
3286 int t4_get_exprom_version(struct adapter *adap, u32 *vers)
3288 struct exprom_header {
3289 unsigned char hdr_arr[16]; /* must start with 0x55aa */
3290 unsigned char hdr_ver[4]; /* Expansion ROM version */
3292 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
3296 ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
3297 ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
3302 hdr = (struct exprom_header *)exprom_header_buf;
3303 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
3306 *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
3307 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
3308 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
3309 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
3314 * t4_get_scfg_version - return the Serial Configuration version
3315 * @adapter: the adapter
3316 * @vers: where to place the version
3318 * Reads the Serial Configuration Version via the Firmware interface
3319 * (thus this can only be called once we're ready to issue Firmware
3320 * commands). The format of the Serial Configuration version is
3321 * adapter specific. Returns 0 on success, an error on failure.
3323 * Note that early versions of the Firmware didn't include the ability
3324 * to retrieve the Serial Configuration version, so we zero-out the
3325 * return-value parameter in that case to avoid leaving it with
3328 * Also note that the Firmware will return its cached copy of the Serial
3329 * Initialization Revision ID, not the actual Revision ID as written in
3330 * the Serial EEPROM. This is only an issue if a new VPD has been written
3331 * and the Firmware/Chip haven't yet gone through a RESET sequence. So
3332 * it's best to defer calling this routine till after a FW_RESET_CMD has
3333 * been issued if the Host Driver will be performing a full adapter
3336 int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
3341 scfgrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3342 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_SCFGREV));
3343 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3344 1, &scfgrev_param, vers);
3351 * t4_get_vpd_version - return the VPD version
3352 * @adapter: the adapter
3353 * @vers: where to place the version
3355 * Reads the VPD via the Firmware interface (thus this can only be called
3356 * once we're ready to issue Firmware commands). The format of the
3357 * VPD version is adapter specific. Returns 0 on success, an error on
3360 * Note that early versions of the Firmware didn't include the ability
3361 * to retrieve the VPD version, so we zero-out the return-value parameter
3362 * in that case to avoid leaving it with garbage in it.
3364 * Also note that the Firmware will return its cached copy of the VPD
3365 * Revision ID, not the actual Revision ID as written in the Serial
3366 * EEPROM. This is only an issue if a new VPD has been written and the
3367 * Firmware/Chip haven't yet gone through a RESET sequence. So it's best
3368 * to defer calling this routine till after a FW_RESET_CMD has been issued
3369 * if the Host Driver will be performing a full adapter initialization.
3371 int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
3376 vpdrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3377 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_VPDREV));
3378 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3379 1, &vpdrev_param, vers);
3386 * t4_get_version_info - extract various chip/firmware version information
3387 * @adapter: the adapter
3389 * Reads various chip/firmware version numbers and stores them into the
3390 * adapter Adapter Parameters structure. If any of the efforts fails
3391 * the first failure will be returned, but all of the version numbers
3394 int t4_get_version_info(struct adapter *adapter)
3398 #define FIRST_RET(__getvinfo) \
3400 int __ret = __getvinfo; \
3401 if (__ret && !ret) \
3405 FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
3406 FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
3407 FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
3408 FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
3409 FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
3410 FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
3418 * t4_flash_erase_sectors - erase a range of flash sectors
3419 * @adapter: the adapter
3420 * @start: the first sector to erase
3421 * @end: the last sector to erase
3423 * Erases the sectors in the given inclusive range.
3425 int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
3429 if (end >= adapter->params.sf_nsec)
3432 while (start <= end) {
3433 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3434 (ret = sf1_write(adapter, 4, 0, 1,
3435 SF_ERASE_SECTOR | (start << 8))) != 0 ||
3436 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
3438 "erase of flash sector %d failed, error %d\n",
3444 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3449 * t4_flash_cfg_addr - return the address of the flash configuration file
3450 * @adapter: the adapter
3452 * Return the address within the flash where the Firmware Configuration
3453 * File is stored, or an error if the device FLASH is too small to contain
3454 * a Firmware Configuration File.
3456 int t4_flash_cfg_addr(struct adapter *adapter)
3459 * If the device FLASH isn't large enough to hold a Firmware
3460 * Configuration File, return an error.
3462 if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
3465 return FLASH_CFG_START;
3469 * Return TRUE if the specified firmware matches the adapter. I.e. T4
3470 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
3471 * and emit an error message for mismatched firmware to save our caller the
3474 static int t4_fw_matches_chip(struct adapter *adap,
3475 const struct fw_hdr *hdr)
3478 * The expression below will return FALSE for any unsupported adapter
3479 * which will keep us "honest" in the future ...
3481 if ((is_t4(adap) && hdr->chip == FW_HDR_CHIP_T4) ||
3482 (is_t5(adap) && hdr->chip == FW_HDR_CHIP_T5) ||
3483 (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6))
3487 "FW image (%d) is not suitable for this adapter (%d)\n",
3488 hdr->chip, chip_id(adap));
3493 * t4_load_fw - download firmware
3494 * @adap: the adapter
3495 * @fw_data: the firmware image to write
3498 * Write the supplied firmware image to the card's serial flash.
3500 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3505 u8 first_page[SF_PAGE_SIZE];
3506 const u32 *p = (const u32 *)fw_data;
3507 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
3508 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
3509 unsigned int fw_start_sec;
3510 unsigned int fw_start;
3511 unsigned int fw_size;
3513 if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) {
3514 fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
3515 fw_start = FLASH_FWBOOTSTRAP_START;
3516 fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
3518 fw_start_sec = FLASH_FW_START_SEC;
3519 fw_start = FLASH_FW_START;
3520 fw_size = FLASH_FW_MAX_SIZE;
3524 CH_ERR(adap, "FW image has no data\n");
3529 "FW image size not multiple of 512 bytes\n");
3532 if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) {
3534 "FW image size differs from size in FW header\n");
3537 if (size > fw_size) {
3538 CH_ERR(adap, "FW image too large, max is %u bytes\n",
3542 if (!t4_fw_matches_chip(adap, hdr))
3545 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
3546 csum += be32_to_cpu(p[i]);
3548 if (csum != 0xffffffff) {
3550 "corrupted firmware image, checksum %#x\n", csum);
3554 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
3555 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
3560 * We write the correct version at the end so the driver can see a bad
3561 * version if the FW write fails. Start by writing a copy of the
3562 * first page with a bad version.
3564 memcpy(first_page, fw_data, SF_PAGE_SIZE);
3565 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
3566 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
3571 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
3572 addr += SF_PAGE_SIZE;
3573 fw_data += SF_PAGE_SIZE;
3574 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
3579 ret = t4_write_flash(adap,
3580 fw_start + offsetof(struct fw_hdr, fw_ver),
3581 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
3584 CH_ERR(adap, "firmware download failed, error %d\n",
3590 * t4_fwcache - firmware cache operation
3591 * @adap: the adapter
3592 * @op : the operation (flush or flush and invalidate)
3594 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
3596 struct fw_params_cmd c;
3598 memset(&c, 0, sizeof(c));
3600 cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
3601 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
3602 V_FW_PARAMS_CMD_PFN(adap->pf) |
3603 V_FW_PARAMS_CMD_VFN(0));
3604 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3606 cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3607 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE));
3608 c.param[0].val = (__force __be32)op;
3610 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
3613 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
3614 unsigned int *pif_req_wrptr,
3615 unsigned int *pif_rsp_wrptr)
3618 u32 cfg, val, req, rsp;
3620 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
3621 if (cfg & F_LADBGEN)
3622 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
3624 val = t4_read_reg(adap, A_CIM_DEBUGSTS);
3625 req = G_POLADBGWRPTR(val);
3626 rsp = G_PILADBGWRPTR(val);
3628 *pif_req_wrptr = req;
3630 *pif_rsp_wrptr = rsp;
3632 for (i = 0; i < CIM_PIFLA_SIZE; i++) {
3633 for (j = 0; j < 6; j++) {
3634 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
3635 V_PILADBGRDPTR(rsp));
3636 *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
3637 *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
3641 req = (req + 2) & M_POLADBGRDPTR;
3642 rsp = (rsp + 2) & M_PILADBGRDPTR;
3644 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
3647 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
3652 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
3653 if (cfg & F_LADBGEN)
3654 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
3656 for (i = 0; i < CIM_MALA_SIZE; i++) {
3657 for (j = 0; j < 5; j++) {
3659 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
3660 V_PILADBGRDPTR(idx));
3661 *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
3662 *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
3665 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
3668 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
3672 for (i = 0; i < 8; i++) {
3673 u32 *p = la_buf + i;
3675 t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
3676 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
3677 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
3678 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
3679 *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
3683 #define ADVERT_MASK (V_FW_PORT_CAP_SPEED(M_FW_PORT_CAP_SPEED) | \
3687 * t4_link_l1cfg - apply link configuration to MAC/PHY
3688 * @phy: the PHY to setup
3689 * @mac: the MAC to setup
3690 * @lc: the requested link configuration
3692 * Set up a port's MAC and PHY according to a desired link configuration.
3693 * - If the PHY can auto-negotiate first decide what to advertise, then
3694 * enable/disable auto-negotiation as desired, and reset.
3695 * - If the PHY does not auto-negotiate just reset it.
3696 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
3697 * otherwise do it later based on the outcome of auto-negotiation.
3699 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
3700 struct link_config *lc)
3702 struct fw_port_cmd c;
3703 unsigned int mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
3704 unsigned int fc, fec;
3707 if (lc->requested_fc & PAUSE_RX)
3708 fc |= FW_PORT_CAP_FC_RX;
3709 if (lc->requested_fc & PAUSE_TX)
3710 fc |= FW_PORT_CAP_FC_TX;
3713 if (lc->requested_fec & FEC_RS)
3714 fec |= FW_PORT_CAP_FEC_RS;
3715 if (lc->requested_fec & FEC_BASER_RS)
3716 fec |= FW_PORT_CAP_FEC_BASER_RS;
3717 if (lc->requested_fec & FEC_RESERVED)
3718 fec |= FW_PORT_CAP_FEC_RESERVED;
3720 memset(&c, 0, sizeof(c));
3721 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
3722 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
3723 V_FW_PORT_CMD_PORTID(port));
3725 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
3728 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
3729 c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
3731 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
3732 lc->fec = lc->requested_fec;
3733 } else if (lc->autoneg == AUTONEG_DISABLE) {
3734 c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed |
3736 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
3737 lc->fec = lc->requested_fec;
3739 c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | fec | mdi);
3741 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3745 * t4_restart_aneg - restart autonegotiation
3746 * @adap: the adapter
3747 * @mbox: mbox to use for the FW command
3748 * @port: the port id
3750 * Restarts autonegotiation for the selected port.
3752 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
3754 struct fw_port_cmd c;
3756 memset(&c, 0, sizeof(c));
3757 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
3758 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
3759 V_FW_PORT_CMD_PORTID(port));
3761 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
3763 c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
3764 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3767 typedef void (*int_handler_t)(struct adapter *adap);
3770 unsigned int mask; /* bits to check in interrupt status */
3771 const char *msg; /* message to print or NULL */
3772 short stat_idx; /* stat counter to increment or -1 */
3773 unsigned short fatal; /* whether the condition reported is fatal */
3774 int_handler_t int_handler; /* platform-specific int handler */
3778 * t4_handle_intr_status - table driven interrupt handler
3779 * @adapter: the adapter that generated the interrupt
3780 * @reg: the interrupt status register to process
3781 * @acts: table of interrupt actions
3783 * A table driven interrupt handler that applies a set of masks to an
3784 * interrupt status word and performs the corresponding actions if the
3785 * interrupts described by the mask have occurred. The actions include
3786 * optionally emitting a warning or alert message. The table is terminated
3787 * by an entry specifying mask 0. Returns the number of fatal interrupt
3790 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
3791 const struct intr_info *acts)
3794 unsigned int mask = 0;
3795 unsigned int status = t4_read_reg(adapter, reg);
3797 for ( ; acts->mask; ++acts) {
3798 if (!(status & acts->mask))
3802 CH_ALERT(adapter, "%s (0x%x)\n", acts->msg,
3803 status & acts->mask);
3804 } else if (acts->msg)
3805 CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n", acts->msg,
3806 status & acts->mask);
3807 if (acts->int_handler)
3808 acts->int_handler(adapter);
3812 if (status) /* clear processed interrupts */
3813 t4_write_reg(adapter, reg, status);
3818 * Interrupt handler for the PCIE module.
3820 static void pcie_intr_handler(struct adapter *adapter)
3822 static const struct intr_info sysbus_intr_info[] = {
3823 { F_RNPP, "RXNP array parity error", -1, 1 },
3824 { F_RPCP, "RXPC array parity error", -1, 1 },
3825 { F_RCIP, "RXCIF array parity error", -1, 1 },
3826 { F_RCCP, "Rx completions control array parity error", -1, 1 },
3827 { F_RFTP, "RXFT array parity error", -1, 1 },
3830 static const struct intr_info pcie_port_intr_info[] = {
3831 { F_TPCP, "TXPC array parity error", -1, 1 },
3832 { F_TNPP, "TXNP array parity error", -1, 1 },
3833 { F_TFTP, "TXFT array parity error", -1, 1 },
3834 { F_TCAP, "TXCA array parity error", -1, 1 },
3835 { F_TCIP, "TXCIF array parity error", -1, 1 },
3836 { F_RCAP, "RXCA array parity error", -1, 1 },
3837 { F_OTDD, "outbound request TLP discarded", -1, 1 },
3838 { F_RDPE, "Rx data parity error", -1, 1 },
3839 { F_TDUE, "Tx uncorrectable data error", -1, 1 },
3842 static const struct intr_info pcie_intr_info[] = {
3843 { F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
3844 { F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
3845 { F_MSIDATAPERR, "MSI data parity error", -1, 1 },
3846 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
3847 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
3848 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
3849 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
3850 { F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
3851 { F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
3852 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
3853 { F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
3854 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
3855 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
3856 { F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
3857 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
3858 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
3859 { F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
3860 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
3861 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
3862 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
3863 { F_FIDPERR, "PCI FID parity error", -1, 1 },
3864 { F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
3865 { F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
3866 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
3867 { F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
3868 { F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
3869 { F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
3870 { F_PCIESINT, "PCI core secondary fault", -1, 1 },
3871 { F_PCIEPINT, "PCI core primary fault", -1, 1 },
3872 { F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
3877 static const struct intr_info t5_pcie_intr_info[] = {
3878 { F_MSTGRPPERR, "Master Response Read Queue parity error",
3880 { F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
3881 { F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
3882 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
3883 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
3884 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
3885 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
3886 { F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
3888 { F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
3890 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
3891 { F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
3892 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
3893 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
3894 { F_DREQWRPERR, "PCI DMA channel write request parity error",
3896 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
3897 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
3898 { F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
3899 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
3900 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
3901 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
3902 { F_FIDPERR, "PCI FID parity error", -1, 1 },
3903 { F_VFIDPERR, "PCI INTx clear parity error", -1, 1 },
3904 { F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
3905 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
3906 { F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
3908 { F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
3910 { F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
3911 { F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
3912 { F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
3913 { F_READRSPERR, "Outbound read error", -1,
3921 fat = t4_handle_intr_status(adapter,
3922 A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
3924 t4_handle_intr_status(adapter,
3925 A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
3926 pcie_port_intr_info) +
3927 t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
3930 fat = t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
3933 t4_fatal_err(adapter);
3937 * TP interrupt handler.
3939 static void tp_intr_handler(struct adapter *adapter)
3941 static const struct intr_info tp_intr_info[] = {
3942 { 0x3fffffff, "TP parity error", -1, 1 },
3943 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
3947 if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
3948 t4_fatal_err(adapter);
3952 * SGE interrupt handler.
3954 static void sge_intr_handler(struct adapter *adapter)
3959 static const struct intr_info sge_intr_info[] = {
3960 { F_ERR_CPL_EXCEED_IQE_SIZE,
3961 "SGE received CPL exceeding IQE size", -1, 1 },
3962 { F_ERR_INVALID_CIDX_INC,
3963 "SGE GTS CIDX increment too large", -1, 0 },
3964 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
3965 { F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
3966 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
3967 "SGE IQID > 1023 received CPL for FL", -1, 0 },
3968 { F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
3970 { F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
3972 { F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
3974 { F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
3976 { F_ERR_ING_CTXT_PRIO,
3977 "SGE too many priority ingress contexts", -1, 0 },
3978 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
3979 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
3983 static const struct intr_info t4t5_sge_intr_info[] = {
3984 { F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
3985 { F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
3986 { F_ERR_EGR_CTXT_PRIO,
3987 "SGE too many priority egress contexts", -1, 0 },
3992 * For now, treat below interrupts as fatal so that we disable SGE and
3993 * get better debug */
3994 static const struct intr_info t6_sge_intr_info[] = {
3995 { F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1,
3996 "SGE PCIe error for a DBP thread", -1, 1 },
3998 "SGE Actual WRE packet is less than advertized length",
4003 v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
4004 ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
4006 CH_ALERT(adapter, "SGE parity error (%#llx)\n",
4007 (unsigned long long)v);
4008 t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
4009 t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
4012 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
4013 if (chip_id(adapter) <= CHELSIO_T5)
4014 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
4015 t4t5_sge_intr_info);
4017 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
4020 err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
4021 if (err & F_ERROR_QID_VALID) {
4022 CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
4023 if (err & F_UNCAPTURED_ERROR)
4024 CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
4025 t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
4026 F_UNCAPTURED_ERROR);
4030 t4_fatal_err(adapter);
4033 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
4034 F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
4035 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
4036 F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
4039 * CIM interrupt handler.
4041 static void cim_intr_handler(struct adapter *adapter)
4043 static const struct intr_info cim_intr_info[] = {
4044 { F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
4045 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
4046 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
4047 { F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
4048 { F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
4049 { F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
4050 { F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
4053 static const struct intr_info cim_upintr_info[] = {
4054 { F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
4055 { F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
4056 { F_ILLWRINT, "CIM illegal write", -1, 1 },
4057 { F_ILLRDINT, "CIM illegal read", -1, 1 },
4058 { F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
4059 { F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
4060 { F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
4061 { F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
4062 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
4063 { F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
4064 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
4065 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
4066 { F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
4067 { F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
4068 { F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
4069 { F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
4070 { F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
4071 { F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
4072 { F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
4073 { F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
4074 { F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
4075 { F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
4076 { F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
4077 { F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
4078 { F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
4079 { F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
4080 { F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
4081 { F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
4086 if (t4_read_reg(adapter, A_PCIE_FW) & F_PCIE_FW_ERR)
4087 t4_report_fw_error(adapter);
4089 fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
4091 t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
4094 t4_fatal_err(adapter);
4098 * ULP RX interrupt handler.
4100 static void ulprx_intr_handler(struct adapter *adapter)
4102 static const struct intr_info ulprx_intr_info[] = {
4103 { F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
4104 { F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
4105 { 0x7fffff, "ULPRX parity error", -1, 1 },
4109 if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
4110 t4_fatal_err(adapter);
4114 * ULP TX interrupt handler.
4116 static void ulptx_intr_handler(struct adapter *adapter)
4118 static const struct intr_info ulptx_intr_info[] = {
4119 { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
4121 { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
4123 { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
4125 { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
4127 { 0xfffffff, "ULPTX parity error", -1, 1 },
4131 if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
4132 t4_fatal_err(adapter);
4136 * PM TX interrupt handler.
4138 static void pmtx_intr_handler(struct adapter *adapter)
4140 static const struct intr_info pmtx_intr_info[] = {
4141 { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
4142 { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
4143 { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
4144 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
4145 { 0xffffff0, "PMTX framing error", -1, 1 },
4146 { F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
4147 { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
4149 { F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
4150 { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
4154 if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
4155 t4_fatal_err(adapter);
4159 * PM RX interrupt handler.
4161 static void pmrx_intr_handler(struct adapter *adapter)
4163 static const struct intr_info pmrx_intr_info[] = {
4164 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
4165 { 0x3ffff0, "PMRX framing error", -1, 1 },
4166 { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
4167 { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
4169 { F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
4170 { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
4174 if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
4175 t4_fatal_err(adapter);
4179 * CPL switch interrupt handler.
4181 static void cplsw_intr_handler(struct adapter *adapter)
4183 static const struct intr_info cplsw_intr_info[] = {
4184 { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
4185 { F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
4186 { F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
4187 { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
4188 { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
4189 { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
4193 if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
4194 t4_fatal_err(adapter);
4198 * LE interrupt handler.
4200 static void le_intr_handler(struct adapter *adap)
4202 unsigned int chip_ver = chip_id(adap);
4203 static const struct intr_info le_intr_info[] = {
4204 { F_LIPMISS, "LE LIP miss", -1, 0 },
4205 { F_LIP0, "LE 0 LIP error", -1, 0 },
4206 { F_PARITYERR, "LE parity error", -1, 1 },
4207 { F_UNKNOWNCMD, "LE unknown command", -1, 1 },
4208 { F_REQQPARERR, "LE request queue parity error", -1, 1 },
4212 static const struct intr_info t6_le_intr_info[] = {
4213 { F_T6_LIPMISS, "LE LIP miss", -1, 0 },
4214 { F_T6_LIP0, "LE 0 LIP error", -1, 0 },
4215 { F_TCAMINTPERR, "LE parity error", -1, 1 },
4216 { F_T6_UNKNOWNCMD, "LE unknown command", -1, 1 },
4217 { F_SSRAMINTPERR, "LE request queue parity error", -1, 1 },
4221 if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE,
4222 (chip_ver <= CHELSIO_T5) ?
4223 le_intr_info : t6_le_intr_info))
4228 * MPS interrupt handler.
4230 static void mps_intr_handler(struct adapter *adapter)
4232 static const struct intr_info mps_rx_intr_info[] = {
4233 { 0xffffff, "MPS Rx parity error", -1, 1 },
4236 static const struct intr_info mps_tx_intr_info[] = {
4237 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
4238 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4239 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
4241 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
4243 { F_BUBBLE, "MPS Tx underflow", -1, 1 },
4244 { F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
4245 { F_FRMERR, "MPS Tx framing error", -1, 1 },
4248 static const struct intr_info mps_trc_intr_info[] = {
4249 { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
4250 { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
4252 { F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
4255 static const struct intr_info mps_stat_sram_intr_info[] = {
4256 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
4259 static const struct intr_info mps_stat_tx_intr_info[] = {
4260 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
4263 static const struct intr_info mps_stat_rx_intr_info[] = {
4264 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
4267 static const struct intr_info mps_cls_intr_info[] = {
4268 { F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
4269 { F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
4270 { F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
4276 fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
4278 t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
4280 t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
4281 mps_trc_intr_info) +
4282 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
4283 mps_stat_sram_intr_info) +
4284 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
4285 mps_stat_tx_intr_info) +
4286 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
4287 mps_stat_rx_intr_info) +
4288 t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
4291 t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
4292 t4_read_reg(adapter, A_MPS_INT_CAUSE); /* flush */
4294 t4_fatal_err(adapter);
4297 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | \
4301 * EDC/MC interrupt handler.
4303 static void mem_intr_handler(struct adapter *adapter, int idx)
4305 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
4307 unsigned int addr, cnt_addr, v;
4309 if (idx <= MEM_EDC1) {
4310 addr = EDC_REG(A_EDC_INT_CAUSE, idx);
4311 cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
4312 } else if (idx == MEM_MC) {
4313 if (is_t4(adapter)) {
4314 addr = A_MC_INT_CAUSE;
4315 cnt_addr = A_MC_ECC_STATUS;
4317 addr = A_MC_P_INT_CAUSE;
4318 cnt_addr = A_MC_P_ECC_STATUS;
4321 addr = MC_REG(A_MC_P_INT_CAUSE, 1);
4322 cnt_addr = MC_REG(A_MC_P_ECC_STATUS, 1);
4325 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
4326 if (v & F_PERR_INT_CAUSE)
4327 CH_ALERT(adapter, "%s FIFO parity error\n",
4329 if (v & F_ECC_CE_INT_CAUSE) {
4330 u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
4332 t4_edc_err_read(adapter, idx);
4334 t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
4335 CH_WARN_RATELIMIT(adapter,
4336 "%u %s correctable ECC data error%s\n",
4337 cnt, name[idx], cnt > 1 ? "s" : "");
4339 if (v & F_ECC_UE_INT_CAUSE)
4341 "%s uncorrectable ECC data error\n", name[idx]);
4343 t4_write_reg(adapter, addr, v);
4344 if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
4345 t4_fatal_err(adapter);
4349 * MA interrupt handler.
4351 static void ma_intr_handler(struct adapter *adapter)
4353 u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
4355 if (status & F_MEM_PERR_INT_CAUSE) {
4357 "MA parity error, parity status %#x\n",
4358 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS1));
4361 "MA parity error, parity status %#x\n",
4362 t4_read_reg(adapter,
4363 A_MA_PARITY_ERROR_STATUS2));
4365 if (status & F_MEM_WRAP_INT_CAUSE) {
4366 v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
4367 CH_ALERT(adapter, "MA address wrap-around error by "
4368 "client %u to address %#x\n",
4369 G_MEM_WRAP_CLIENT_NUM(v),
4370 G_MEM_WRAP_ADDRESS(v) << 4);
4372 t4_write_reg(adapter, A_MA_INT_CAUSE, status);
4373 t4_fatal_err(adapter);
4377 * SMB interrupt handler.
4379 static void smb_intr_handler(struct adapter *adap)
4381 static const struct intr_info smb_intr_info[] = {
4382 { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
4383 { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
4384 { F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
4388 if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
4393 * NC-SI interrupt handler.
4395 static void ncsi_intr_handler(struct adapter *adap)
4397 static const struct intr_info ncsi_intr_info[] = {
4398 { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
4399 { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
4400 { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
4401 { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
4405 if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
4410 * XGMAC interrupt handler.
4412 static void xgmac_intr_handler(struct adapter *adap, int port)
4414 u32 v, int_cause_reg;
4417 int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
4419 int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
4421 v = t4_read_reg(adap, int_cause_reg);
4423 v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR);
4427 if (v & F_TXFIFO_PRTY_ERR)
4428 CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n",
4430 if (v & F_RXFIFO_PRTY_ERR)
4431 CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n",
4433 t4_write_reg(adap, int_cause_reg, v);
4438 * PL interrupt handler.
4440 static void pl_intr_handler(struct adapter *adap)
4442 static const struct intr_info pl_intr_info[] = {
4443 { F_FATALPERR, "Fatal parity error", -1, 1 },
4444 { F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
4448 static const struct intr_info t5_pl_intr_info[] = {
4449 { F_FATALPERR, "Fatal parity error", -1, 1 },
4453 if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE,
4455 pl_intr_info : t5_pl_intr_info))
4459 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
4462 * t4_slow_intr_handler - control path interrupt handler
4463 * @adapter: the adapter
4465 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
4466 * The designation 'slow' is because it involves register reads, while
4467 * data interrupts typically don't involve any MMIOs.
4469 int t4_slow_intr_handler(struct adapter *adapter)
4471 u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
4473 if (!(cause & GLBL_INTR_MASK))
4476 cim_intr_handler(adapter);
4478 mps_intr_handler(adapter);
4480 ncsi_intr_handler(adapter);
4482 pl_intr_handler(adapter);
4484 smb_intr_handler(adapter);
4486 xgmac_intr_handler(adapter, 0);
4488 xgmac_intr_handler(adapter, 1);
4490 xgmac_intr_handler(adapter, 2);
4492 xgmac_intr_handler(adapter, 3);
4494 pcie_intr_handler(adapter);
4496 mem_intr_handler(adapter, MEM_MC);
4497 if (is_t5(adapter) && (cause & F_MC1))
4498 mem_intr_handler(adapter, MEM_MC1);
4500 mem_intr_handler(adapter, MEM_EDC0);
4502 mem_intr_handler(adapter, MEM_EDC1);
4504 le_intr_handler(adapter);
4506 tp_intr_handler(adapter);
4508 ma_intr_handler(adapter);
4509 if (cause & F_PM_TX)
4510 pmtx_intr_handler(adapter);
4511 if (cause & F_PM_RX)
4512 pmrx_intr_handler(adapter);
4513 if (cause & F_ULP_RX)
4514 ulprx_intr_handler(adapter);
4515 if (cause & F_CPL_SWITCH)
4516 cplsw_intr_handler(adapter);
4518 sge_intr_handler(adapter);
4519 if (cause & F_ULP_TX)
4520 ulptx_intr_handler(adapter);
4522 /* Clear the interrupts just processed for which we are the master. */
4523 t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
4524 (void)t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
4529 * t4_intr_enable - enable interrupts
4530 * @adapter: the adapter whose interrupts should be enabled
4532 * Enable PF-specific interrupts for the calling function and the top-level
4533 * interrupt concentrator for global interrupts. Interrupts are already
4534 * enabled at each module, here we just enable the roots of the interrupt
4537 * Note: this function should be called only when the driver manages
4538 * non PF-specific interrupts from the various HW modules. Only one PCI
4539 * function at a time should be doing this.
4541 void t4_intr_enable(struct adapter *adapter)
4544 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4545 u32 pf = (chip_id(adapter) <= CHELSIO_T5
4546 ? G_SOURCEPF(whoami)
4547 : G_T6_SOURCEPF(whoami));
4549 if (chip_id(adapter) <= CHELSIO_T5)
4550 val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
4552 val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN;
4553 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
4554 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
4555 F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR |
4556 F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
4557 F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
4558 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
4559 F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val);
4560 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
4561 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
4565 * t4_intr_disable - disable interrupts
4566 * @adapter: the adapter whose interrupts should be disabled
4568 * Disable interrupts. We only disable the top-level interrupt
4569 * concentrators. The caller must be a PCI function managing global
4572 void t4_intr_disable(struct adapter *adapter)
4574 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4575 u32 pf = (chip_id(adapter) <= CHELSIO_T5
4576 ? G_SOURCEPF(whoami)
4577 : G_T6_SOURCEPF(whoami));
4579 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
4580 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
4584 * t4_intr_clear - clear all interrupts
4585 * @adapter: the adapter whose interrupts should be cleared
4587 * Clears all interrupts. The caller must be a PCI function managing
4588 * global interrupts.
4590 void t4_intr_clear(struct adapter *adapter)
4592 static const unsigned int cause_reg[] = {
4593 A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
4594 A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
4595 A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS1, A_MA_INT_CAUSE,
4596 A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
4597 A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
4598 MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
4600 A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE,
4601 A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE,
4602 A_MPS_RX_PERR_INT_CAUSE,
4604 MYPF_REG(A_PL_PF_INT_CAUSE),
4611 for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
4612 t4_write_reg(adapter, cause_reg[i], 0xffffffff);
4614 t4_write_reg(adapter, is_t4(adapter) ? A_MC_INT_CAUSE :
4615 A_MC_P_INT_CAUSE, 0xffffffff);
4617 if (is_t4(adapter)) {
4618 t4_write_reg(adapter, A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
4620 t4_write_reg(adapter, A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
4623 t4_write_reg(adapter, A_MA_PARITY_ERROR_STATUS2, 0xffffffff);
4625 t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
4626 (void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
4630 * hash_mac_addr - return the hash value of a MAC address
4631 * @addr: the 48-bit Ethernet MAC address
4633 * Hashes a MAC address according to the hash function used by HW inexact
4634 * (hash) address matching.
4636 static int hash_mac_addr(const u8 *addr)
4638 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
4639 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
4647 * t4_config_rss_range - configure a portion of the RSS mapping table
4648 * @adapter: the adapter
4649 * @mbox: mbox to use for the FW command
4650 * @viid: virtual interface whose RSS subtable is to be written
4651 * @start: start entry in the table to write
4652 * @n: how many table entries to write
4653 * @rspq: values for the "response queue" (Ingress Queue) lookup table
4654 * @nrspq: number of values in @rspq
4656 * Programs the selected part of the VI's RSS mapping table with the
4657 * provided values. If @nrspq < @n the supplied values are used repeatedly
4658 * until the full table range is populated.
4660 * The caller must ensure the values in @rspq are in the range allowed for
4663 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
4664 int start, int n, const u16 *rspq, unsigned int nrspq)
4667 const u16 *rsp = rspq;
4668 const u16 *rsp_end = rspq + nrspq;
4669 struct fw_rss_ind_tbl_cmd cmd;
4671 memset(&cmd, 0, sizeof(cmd));
4672 cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
4673 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4674 V_FW_RSS_IND_TBL_CMD_VIID(viid));
4675 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
4678 * Each firmware RSS command can accommodate up to 32 RSS Ingress
4679 * Queue Identifiers. These Ingress Queue IDs are packed three to
4680 * a 32-bit word as 10-bit values with the upper remaining 2 bits
4684 int nq = min(n, 32);
4686 __be32 *qp = &cmd.iq0_to_iq2;
4689 * Set up the firmware RSS command header to send the next
4690 * "nq" Ingress Queue IDs to the firmware.
4692 cmd.niqid = cpu_to_be16(nq);
4693 cmd.startidx = cpu_to_be16(start);
4696 * "nq" more done for the start of the next loop.
4702 * While there are still Ingress Queue IDs to stuff into the
4703 * current firmware RSS command, retrieve them from the
4704 * Ingress Queue ID array and insert them into the command.
4708 * Grab up to the next 3 Ingress Queue IDs (wrapping
4709 * around the Ingress Queue ID array if necessary) and
4710 * insert them into the firmware RSS command at the
4711 * current 3-tuple position within the commad.
4715 int nqbuf = min(3, nq);
4718 qbuf[0] = qbuf[1] = qbuf[2] = 0;
4719 while (nqbuf && nq_packed < 32) {
4726 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
4727 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
4728 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
4732 * Send this portion of the RRS table update to the firmware;
4733 * bail out on any errors.
4735 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
4743 * t4_config_glbl_rss - configure the global RSS mode
4744 * @adapter: the adapter
4745 * @mbox: mbox to use for the FW command
4746 * @mode: global RSS mode
4747 * @flags: mode-specific flags
4749 * Sets the global RSS mode.
4751 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
4754 struct fw_rss_glb_config_cmd c;
4756 memset(&c, 0, sizeof(c));
4757 c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
4758 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
4759 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4760 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
4761 c.u.manual.mode_pkd =
4762 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
4763 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
4764 c.u.basicvirtual.mode_keymode =
4765 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
4766 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
4769 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
4773 * t4_config_vi_rss - configure per VI RSS settings
4774 * @adapter: the adapter
4775 * @mbox: mbox to use for the FW command
4778 * @defq: id of the default RSS queue for the VI.
4779 * @skeyidx: RSS secret key table index for non-global mode
4780 * @skey: RSS vf_scramble key for VI.
4782 * Configures VI-specific RSS properties.
4784 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
4785 unsigned int flags, unsigned int defq, unsigned int skeyidx,
4788 struct fw_rss_vi_config_cmd c;
4790 memset(&c, 0, sizeof(c));
4791 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
4792 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4793 V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
4794 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4795 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
4796 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
4797 c.u.basicvirtual.secretkeyidx_pkd = cpu_to_be32(
4798 V_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(skeyidx));
4799 c.u.basicvirtual.secretkeyxor = cpu_to_be32(skey);
4801 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
4804 /* Read an RSS table row */
4805 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
4807 t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
4808 return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
4813 * t4_read_rss - read the contents of the RSS mapping table
4814 * @adapter: the adapter
4815 * @map: holds the contents of the RSS mapping table
4817 * Reads the contents of the RSS hash->queue mapping table.
4819 int t4_read_rss(struct adapter *adapter, u16 *map)
4824 for (i = 0; i < RSS_NENTRIES / 2; ++i) {
4825 ret = rd_rss_row(adapter, i, &val);
4828 *map++ = G_LKPTBLQUEUE0(val);
4829 *map++ = G_LKPTBLQUEUE1(val);
4835 * t4_fw_tp_pio_rw - Access TP PIO through LDST
4836 * @adap: the adapter
4837 * @vals: where the indirect register values are stored/written
4838 * @nregs: how many indirect registers to read/write
4839 * @start_idx: index of first indirect register to read/write
4840 * @rw: Read (1) or Write (0)
4842 * Access TP PIO registers through LDST
4844 void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
4845 unsigned int start_index, unsigned int rw)
4848 int cmd = FW_LDST_ADDRSPC_TP_PIO;
4849 struct fw_ldst_cmd c;
4851 for (i = 0 ; i < nregs; i++) {
4852 memset(&c, 0, sizeof(c));
4853 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
4855 (rw ? F_FW_CMD_READ :
4857 V_FW_LDST_CMD_ADDRSPACE(cmd));
4858 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
4860 c.u.addrval.addr = cpu_to_be32(start_index + i);
4861 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
4862 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4865 vals[i] = be32_to_cpu(c.u.addrval.val);
4871 * t4_read_rss_key - read the global RSS key
4872 * @adap: the adapter
4873 * @key: 10-entry array holding the 320-bit RSS key
4875 * Reads the global 320-bit RSS key.
4877 void t4_read_rss_key(struct adapter *adap, u32 *key)
4879 if (t4_use_ldst(adap))
4880 t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 1);
4882 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
4883 A_TP_RSS_SECRET_KEY0);
4887 * t4_write_rss_key - program one of the RSS keys
4888 * @adap: the adapter
4889 * @key: 10-entry array holding the 320-bit RSS key
4890 * @idx: which RSS key to write
4892 * Writes one of the RSS keys with the given 320-bit value. If @idx is
4893 * 0..15 the corresponding entry in the RSS key table is written,
4894 * otherwise the global RSS key is written.
4896 void t4_write_rss_key(struct adapter *adap, u32 *key, int idx)
4898 u8 rss_key_addr_cnt = 16;
4899 u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
4902 * T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
4903 * allows access to key addresses 16-63 by using KeyWrAddrX
4904 * as index[5:4](upper 2) into key table
4906 if ((chip_id(adap) > CHELSIO_T5) &&
4907 (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
4908 rss_key_addr_cnt = 32;
4910 if (t4_use_ldst(adap))
4911 t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 0);
4913 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
4914 A_TP_RSS_SECRET_KEY0);
4916 if (idx >= 0 && idx < rss_key_addr_cnt) {
4917 if (rss_key_addr_cnt > 16)
4918 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
4919 vrt | V_KEYWRADDRX(idx >> 4) |
4920 V_T6_VFWRADDR(idx) | F_KEYWREN);
4922 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
4923 vrt| V_KEYWRADDR(idx) | F_KEYWREN);
4928 * t4_read_rss_pf_config - read PF RSS Configuration Table
4929 * @adapter: the adapter
4930 * @index: the entry in the PF RSS table to read
4931 * @valp: where to store the returned value
4933 * Reads the PF RSS Configuration Table at the specified index and returns
4934 * the value found there.
4936 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
4939 if (t4_use_ldst(adapter))
4940 t4_fw_tp_pio_rw(adapter, valp, 1,
4941 A_TP_RSS_PF0_CONFIG + index, 1);
4943 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4944 valp, 1, A_TP_RSS_PF0_CONFIG + index);
4948 * t4_write_rss_pf_config - write PF RSS Configuration Table
4949 * @adapter: the adapter
4950 * @index: the entry in the VF RSS table to read
4951 * @val: the value to store
4953 * Writes the PF RSS Configuration Table at the specified index with the
4956 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index,
4959 if (t4_use_ldst(adapter))
4960 t4_fw_tp_pio_rw(adapter, &val, 1,
4961 A_TP_RSS_PF0_CONFIG + index, 0);
4963 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4964 &val, 1, A_TP_RSS_PF0_CONFIG + index);
4968 * t4_read_rss_vf_config - read VF RSS Configuration Table
4969 * @adapter: the adapter
4970 * @index: the entry in the VF RSS table to read
4971 * @vfl: where to store the returned VFL
4972 * @vfh: where to store the returned VFH
4974 * Reads the VF RSS Configuration Table at the specified index and returns
4975 * the (VFL, VFH) values found there.
4977 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
4980 u32 vrt, mask, data;
4982 if (chip_id(adapter) <= CHELSIO_T5) {
4983 mask = V_VFWRADDR(M_VFWRADDR);
4984 data = V_VFWRADDR(index);
4986 mask = V_T6_VFWRADDR(M_T6_VFWRADDR);
4987 data = V_T6_VFWRADDR(index);
4990 * Request that the index'th VF Table values be read into VFL/VFH.
4992 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
4993 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
4994 vrt |= data | F_VFRDEN;
4995 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
4998 * Grab the VFL/VFH values ...
5000 if (t4_use_ldst(adapter)) {
5001 t4_fw_tp_pio_rw(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, 1);
5002 t4_fw_tp_pio_rw(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, 1);
5004 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5005 vfl, 1, A_TP_RSS_VFL_CONFIG);
5006 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5007 vfh, 1, A_TP_RSS_VFH_CONFIG);
5012 * t4_write_rss_vf_config - write VF RSS Configuration Table
5014 * @adapter: the adapter
5015 * @index: the entry in the VF RSS table to write
5016 * @vfl: the VFL to store
5017 * @vfh: the VFH to store
5019 * Writes the VF RSS Configuration Table at the specified index with the
5020 * specified (VFL, VFH) values.
5022 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
5025 u32 vrt, mask, data;
5027 if (chip_id(adapter) <= CHELSIO_T5) {
5028 mask = V_VFWRADDR(M_VFWRADDR);
5029 data = V_VFWRADDR(index);
5031 mask = V_T6_VFWRADDR(M_T6_VFWRADDR);
5032 data = V_T6_VFWRADDR(index);
5036 * Load up VFL/VFH with the values to be written ...
5038 if (t4_use_ldst(adapter)) {
5039 t4_fw_tp_pio_rw(adapter, &vfl, 1, A_TP_RSS_VFL_CONFIG, 0);
5040 t4_fw_tp_pio_rw(adapter, &vfh, 1, A_TP_RSS_VFH_CONFIG, 0);
5042 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5043 &vfl, 1, A_TP_RSS_VFL_CONFIG);
5044 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5045 &vfh, 1, A_TP_RSS_VFH_CONFIG);
5049 * Write the VFL/VFH into the VF Table at index'th location.
5051 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
5052 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
5053 vrt |= data | F_VFRDEN;
5054 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
5058 * t4_read_rss_pf_map - read PF RSS Map
5059 * @adapter: the adapter
5061 * Reads the PF RSS Map register and returns its value.
5063 u32 t4_read_rss_pf_map(struct adapter *adapter)
5067 if (t4_use_ldst(adapter))
5068 t4_fw_tp_pio_rw(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, 1);
5070 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5071 &pfmap, 1, A_TP_RSS_PF_MAP);
5076 * t4_write_rss_pf_map - write PF RSS Map
5077 * @adapter: the adapter
5078 * @pfmap: PF RSS Map value
5080 * Writes the specified value to the PF RSS Map register.
5082 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap)
5084 if (t4_use_ldst(adapter))
5085 t4_fw_tp_pio_rw(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, 0);
5087 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5088 &pfmap, 1, A_TP_RSS_PF_MAP);
5092 * t4_read_rss_pf_mask - read PF RSS Mask
5093 * @adapter: the adapter
5095 * Reads the PF RSS Mask register and returns its value.
5097 u32 t4_read_rss_pf_mask(struct adapter *adapter)
5101 if (t4_use_ldst(adapter))
5102 t4_fw_tp_pio_rw(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, 1);
5104 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5105 &pfmask, 1, A_TP_RSS_PF_MSK);
5110 * t4_write_rss_pf_mask - write PF RSS Mask
5111 * @adapter: the adapter
5112 * @pfmask: PF RSS Mask value
5114 * Writes the specified value to the PF RSS Mask register.
5116 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask)
5118 if (t4_use_ldst(adapter))
5119 t4_fw_tp_pio_rw(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, 0);
5121 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5122 &pfmask, 1, A_TP_RSS_PF_MSK);
5126 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
5127 * @adap: the adapter
5128 * @v4: holds the TCP/IP counter values
5129 * @v6: holds the TCP/IPv6 counter values
5131 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
5132 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
5134 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
5135 struct tp_tcp_stats *v6)
5137 u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
5139 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
5140 #define STAT(x) val[STAT_IDX(x)]
5141 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
5144 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
5145 ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST);
5146 v4->tcp_out_rsts = STAT(OUT_RST);
5147 v4->tcp_in_segs = STAT64(IN_SEG);
5148 v4->tcp_out_segs = STAT64(OUT_SEG);
5149 v4->tcp_retrans_segs = STAT64(RXT_SEG);
5152 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
5153 ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST);
5154 v6->tcp_out_rsts = STAT(OUT_RST);
5155 v6->tcp_in_segs = STAT64(IN_SEG);
5156 v6->tcp_out_segs = STAT64(OUT_SEG);
5157 v6->tcp_retrans_segs = STAT64(RXT_SEG);
5165 * t4_tp_get_err_stats - read TP's error MIB counters
5166 * @adap: the adapter
5167 * @st: holds the counter values
5169 * Returns the values of TP's error counters.
5171 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
5173 int nchan = adap->chip_params->nchan;
5175 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5176 st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0);
5177 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5178 st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0);
5179 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5180 st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0);
5181 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5182 st->tnl_cong_drops, nchan, A_TP_MIB_TNL_CNG_DROP_0);
5183 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5184 st->ofld_chan_drops, nchan, A_TP_MIB_OFD_CHN_DROP_0);
5185 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5186 st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0);
5187 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5188 st->ofld_vlan_drops, nchan, A_TP_MIB_OFD_VLN_DROP_0);
5189 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5190 st->tcp6_in_errs, nchan, A_TP_MIB_TCP_V6IN_ERR_0);
5192 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5193 &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP);
5197 * t4_tp_get_proxy_stats - read TP's proxy MIB counters
5198 * @adap: the adapter
5199 * @st: holds the counter values
5201 * Returns the values of TP's proxy counters.
5203 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st)
5205 int nchan = adap->chip_params->nchan;
5207 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy,
5208 nchan, A_TP_MIB_TNL_LPBK_0);
5212 * t4_tp_get_cpl_stats - read TP's CPL MIB counters
5213 * @adap: the adapter
5214 * @st: holds the counter values
5216 * Returns the values of TP's CPL counters.
5218 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
5220 int nchan = adap->chip_params->nchan;
5222 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req,
5223 nchan, A_TP_MIB_CPL_IN_REQ_0);
5224 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->rsp,
5225 nchan, A_TP_MIB_CPL_OUT_RSP_0);
5229 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
5230 * @adap: the adapter
5231 * @st: holds the counter values
5233 * Returns the values of TP's RDMA counters.
5235 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
5237 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_pkt,
5238 2, A_TP_MIB_RQE_DFR_PKT);
5242 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
5243 * @adap: the adapter
5244 * @idx: the port index
5245 * @st: holds the counter values
5247 * Returns the values of TP's FCoE counters for the selected port.
5249 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
5250 struct tp_fcoe_stats *st)
5254 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->frames_ddp,
5255 1, A_TP_MIB_FCOE_DDP_0 + idx);
5256 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->frames_drop,
5257 1, A_TP_MIB_FCOE_DROP_0 + idx);
5258 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
5259 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx);
5260 st->octets_ddp = ((u64)val[0] << 32) | val[1];
5264 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
5265 * @adap: the adapter
5266 * @st: holds the counter values
5268 * Returns the values of TP's counters for non-TCP directly-placed packets.
5270 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
5274 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4,
5276 st->frames = val[0];
5278 st->octets = ((u64)val[2] << 32) | val[3];
5282 * t4_read_mtu_tbl - returns the values in the HW path MTU table
5283 * @adap: the adapter
5284 * @mtus: where to store the MTU values
5285 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
5287 * Reads the HW path MTU table.
5289 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
5294 for (i = 0; i < NMTUS; ++i) {
5295 t4_write_reg(adap, A_TP_MTU_TABLE,
5296 V_MTUINDEX(0xff) | V_MTUVALUE(i));
5297 v = t4_read_reg(adap, A_TP_MTU_TABLE);
5298 mtus[i] = G_MTUVALUE(v);
5300 mtu_log[i] = G_MTUWIDTH(v);
5305 * t4_read_cong_tbl - reads the congestion control table
5306 * @adap: the adapter
5307 * @incr: where to store the alpha values
5309 * Reads the additive increments programmed into the HW congestion
5312 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
5314 unsigned int mtu, w;
5316 for (mtu = 0; mtu < NMTUS; ++mtu)
5317 for (w = 0; w < NCCTRL_WIN; ++w) {
5318 t4_write_reg(adap, A_TP_CCTRL_TABLE,
5319 V_ROWINDEX(0xffff) | (mtu << 5) | w);
5320 incr[mtu][w] = (u16)t4_read_reg(adap,
5321 A_TP_CCTRL_TABLE) & 0x1fff;
5326 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
5327 * @adap: the adapter
5328 * @addr: the indirect TP register address
5329 * @mask: specifies the field within the register to modify
5330 * @val: new value for the field
5332 * Sets a field of an indirect TP register to the given value.
5334 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
5335 unsigned int mask, unsigned int val)
5337 t4_write_reg(adap, A_TP_PIO_ADDR, addr);
5338 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
5339 t4_write_reg(adap, A_TP_PIO_DATA, val);
5343 * init_cong_ctrl - initialize congestion control parameters
5344 * @a: the alpha values for congestion control
5345 * @b: the beta values for congestion control
5347 * Initialize the congestion control parameters.
5349 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
5351 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
5376 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
5379 b[13] = b[14] = b[15] = b[16] = 3;
5380 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
5381 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
5386 /* The minimum additive increment value for the congestion control table */
5387 #define CC_MIN_INCR 2U
5390 * t4_load_mtus - write the MTU and congestion control HW tables
5391 * @adap: the adapter
5392 * @mtus: the values for the MTU table
5393 * @alpha: the values for the congestion control alpha parameter
5394 * @beta: the values for the congestion control beta parameter
5396 * Write the HW MTU table with the supplied MTUs and the high-speed
5397 * congestion control table with the supplied alpha, beta, and MTUs.
5398 * We write the two tables together because the additive increments
5399 * depend on the MTUs.
5401 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
5402 const unsigned short *alpha, const unsigned short *beta)
5404 static const unsigned int avg_pkts[NCCTRL_WIN] = {
5405 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
5406 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
5407 28672, 40960, 57344, 81920, 114688, 163840, 229376
5412 for (i = 0; i < NMTUS; ++i) {
5413 unsigned int mtu = mtus[i];
5414 unsigned int log2 = fls(mtu);
5416 if (!(mtu & ((1 << log2) >> 2))) /* round */
5418 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
5419 V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
5421 for (w = 0; w < NCCTRL_WIN; ++w) {
5424 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
5427 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
5428 (w << 16) | (beta[w] << 13) | inc);
5434 * t4_set_pace_tbl - set the pace table
5435 * @adap: the adapter
5436 * @pace_vals: the pace values in microseconds
5437 * @start: index of the first entry in the HW pace table to set
5438 * @n: how many entries to set
5440 * Sets (a subset of the) HW pace table.
5442 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
5443 unsigned int start, unsigned int n)
5445 unsigned int vals[NTX_SCHED], i;
5446 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
5451 /* convert values from us to dack ticks, rounding to closest value */
5452 for (i = 0; i < n; i++, pace_vals++) {
5453 vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
5454 if (vals[i] > 0x7ff)
5456 if (*pace_vals && vals[i] == 0)
5459 for (i = 0; i < n; i++, start++)
5460 t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
5465 * t4_set_sched_bps - set the bit rate for a HW traffic scheduler
5466 * @adap: the adapter
5467 * @kbps: target rate in Kbps
5468 * @sched: the scheduler index
5470 * Configure a Tx HW scheduler for the target rate.
5472 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
5474 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
5475 unsigned int clk = adap->params.vpd.cclk * 1000;
5476 unsigned int selected_cpt = 0, selected_bpt = 0;
5479 kbps *= 125; /* -> bytes */
5480 for (cpt = 1; cpt <= 255; cpt++) {
5482 bpt = (kbps + tps / 2) / tps;
5483 if (bpt > 0 && bpt <= 255) {
5485 delta = v >= kbps ? v - kbps : kbps - v;
5486 if (delta < mindelta) {
5491 } else if (selected_cpt)
5497 t4_write_reg(adap, A_TP_TM_PIO_ADDR,
5498 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
5499 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
5501 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
5503 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
5504 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
5509 * t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
5510 * @adap: the adapter
5511 * @sched: the scheduler index
5512 * @ipg: the interpacket delay in tenths of nanoseconds
5514 * Set the interpacket delay for a HW packet rate scheduler.
5516 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
5518 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
5520 /* convert ipg to nearest number of core clocks */
5521 ipg *= core_ticks_per_usec(adap);
5522 ipg = (ipg + 5000) / 10000;
5523 if (ipg > M_TXTIMERSEPQ0)
5526 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
5527 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
5529 v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
5531 v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
5532 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
5533 t4_read_reg(adap, A_TP_TM_PIO_DATA);
5538 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
5539 * clocks. The formula is
5541 * bytes/s = bytes256 * 256 * ClkFreq / 4096
5543 * which is equivalent to
5545 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
5547 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
5549 u64 v = bytes256 * adap->params.vpd.cclk;
5551 return v * 62 + v / 2;
5555 * t4_get_chan_txrate - get the current per channel Tx rates
5556 * @adap: the adapter
5557 * @nic_rate: rates for NIC traffic
5558 * @ofld_rate: rates for offloaded traffic
5560 * Return the current Tx rates in bytes/s for NIC and offloaded traffic
5563 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
5567 v = t4_read_reg(adap, A_TP_TX_TRATE);
5568 nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
5569 nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
5570 if (adap->chip_params->nchan > 2) {
5571 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
5572 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
5575 v = t4_read_reg(adap, A_TP_TX_ORATE);
5576 ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
5577 ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
5578 if (adap->chip_params->nchan > 2) {
5579 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
5580 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
5585 * t4_set_trace_filter - configure one of the tracing filters
5586 * @adap: the adapter
5587 * @tp: the desired trace filter parameters
5588 * @idx: which filter to configure
5589 * @enable: whether to enable or disable the filter
5591 * Configures one of the tracing filters available in HW. If @tp is %NULL
5592 * it indicates that the filter is already written in the register and it
5593 * just needs to be enabled or disabled.
5595 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
5596 int idx, int enable)
5598 int i, ofst = idx * 4;
5599 u32 data_reg, mask_reg, cfg;
5600 u32 multitrc = F_TRCMULTIFILTER;
5601 u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN;
5603 if (idx < 0 || idx >= NTRACE)
5606 if (tp == NULL || !enable) {
5607 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en,
5613 * TODO - After T4 data book is updated, specify the exact
5616 * See T4 data book - MPS section for a complete description
5617 * of the below if..else handling of A_MPS_TRC_CFG register
5620 cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
5621 if (cfg & F_TRCMULTIFILTER) {
5623 * If multiple tracers are enabled, then maximum
5624 * capture size is 2.5KB (FIFO size of a single channel)
5625 * minus 2 flits for CPL_TRACE_PKT header.
5627 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
5631 * If multiple tracers are disabled, to avoid deadlocks
5632 * maximum packet capture size of 9600 bytes is recommended.
5633 * Also in this mode, only trace0 can be enabled and running.
5636 if (tp->snap_len > 9600 || idx)
5640 if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 ||
5641 tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET ||
5642 tp->min_len > M_TFMINPKTSIZE)
5645 /* stop the tracer we'll be changing */
5646 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0);
5648 idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
5649 data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
5650 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
5652 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5653 t4_write_reg(adap, data_reg, tp->data[i]);
5654 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
5656 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
5657 V_TFCAPTUREMAX(tp->snap_len) |
5658 V_TFMINPKTSIZE(tp->min_len));
5659 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
5660 V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en |
5662 V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) :
5663 V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert)));
5669 * t4_get_trace_filter - query one of the tracing filters
5670 * @adap: the adapter
5671 * @tp: the current trace filter parameters
5672 * @idx: which trace filter to query
5673 * @enabled: non-zero if the filter is enabled
5675 * Returns the current settings of one of the HW tracing filters.
5677 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
5681 int i, ofst = idx * 4;
5682 u32 data_reg, mask_reg;
5684 ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
5685 ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
5688 *enabled = !!(ctla & F_TFEN);
5689 tp->port = G_TFPORT(ctla);
5690 tp->invert = !!(ctla & F_TFINVERTMATCH);
5692 *enabled = !!(ctla & F_T5_TFEN);
5693 tp->port = G_T5_TFPORT(ctla);
5694 tp->invert = !!(ctla & F_T5_TFINVERTMATCH);
5696 tp->snap_len = G_TFCAPTUREMAX(ctlb);
5697 tp->min_len = G_TFMINPKTSIZE(ctlb);
5698 tp->skip_ofst = G_TFOFFSET(ctla);
5699 tp->skip_len = G_TFLENGTH(ctla);
5701 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
5702 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
5703 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
5705 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5706 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
5707 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
5712 * t4_pmtx_get_stats - returns the HW stats from PMTX
5713 * @adap: the adapter
5714 * @cnt: where to store the count statistics
5715 * @cycles: where to store the cycle statistics
5717 * Returns performance statistics from PMTX.
5719 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5724 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
5725 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
5726 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
5728 cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
5730 t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
5731 A_PM_TX_DBG_DATA, data, 2,
5732 A_PM_TX_DBG_STAT_MSB);
5733 cycles[i] = (((u64)data[0] << 32) | data[1]);
5739 * t4_pmrx_get_stats - returns the HW stats from PMRX
5740 * @adap: the adapter
5741 * @cnt: where to store the count statistics
5742 * @cycles: where to store the cycle statistics
5744 * Returns performance statistics from PMRX.
5746 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5751 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
5752 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
5753 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
5755 cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
5757 t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
5758 A_PM_RX_DBG_DATA, data, 2,
5759 A_PM_RX_DBG_STAT_MSB);
5760 cycles[i] = (((u64)data[0] << 32) | data[1]);
5766 * t4_get_mps_bg_map - return the buffer groups associated with a port
5767 * @adap: the adapter
5768 * @idx: the port index
5770 * Returns a bitmap indicating which MPS buffer groups are associated
5771 * with the given port. Bit i is set if buffer group i is used by the
5774 static unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
5776 u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
5779 return idx == 0 ? 0xf : 0;
5780 if (n == 1 && chip_id(adap) <= CHELSIO_T5)
5781 return idx < 2 ? (3 << (2 * idx)) : 0;
5786 * t4_get_port_type_description - return Port Type string description
5787 * @port_type: firmware Port Type enumeration
5789 const char *t4_get_port_type_description(enum fw_port_type port_type)
5791 static const char *const port_type_description[] = {
5816 if (port_type < ARRAY_SIZE(port_type_description))
5817 return port_type_description[port_type];
5822 * t4_get_port_stats_offset - collect port stats relative to a previous
5824 * @adap: The adapter
5826 * @stats: Current stats to fill
5827 * @offset: Previous stats snapshot
5829 void t4_get_port_stats_offset(struct adapter *adap, int idx,
5830 struct port_stats *stats,
5831 struct port_stats *offset)
5836 t4_get_port_stats(adap, idx, stats);
5837 for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
5838 i < (sizeof(struct port_stats)/sizeof(u64)) ;
5844 * t4_get_port_stats - collect port statistics
5845 * @adap: the adapter
5846 * @idx: the port index
5847 * @p: the stats structure to fill
5849 * Collect statistics related to the given port from HW.
5851 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
5853 u32 bgmap = t4_get_mps_bg_map(adap, idx);
5856 #define GET_STAT(name) \
5857 t4_read_reg64(adap, \
5858 (is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
5859 T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
5860 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
5862 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
5864 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
5865 p->tx_octets = GET_STAT(TX_PORT_BYTES);
5866 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
5867 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
5868 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
5869 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
5870 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
5871 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
5872 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
5873 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
5874 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
5875 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
5876 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
5877 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
5878 p->tx_drop = GET_STAT(TX_PORT_DROP);
5879 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
5880 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
5881 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
5882 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
5883 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
5884 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
5885 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
5886 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
5888 if (chip_id(adap) >= CHELSIO_T5) {
5889 if (stat_ctl & F_COUNTPAUSESTATTX) {
5890 p->tx_frames -= p->tx_pause;
5891 p->tx_octets -= p->tx_pause * 64;
5893 if (stat_ctl & F_COUNTPAUSEMCTX)
5894 p->tx_mcast_frames -= p->tx_pause;
5897 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
5898 p->rx_octets = GET_STAT(RX_PORT_BYTES);
5899 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
5900 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
5901 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
5902 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
5903 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
5904 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
5905 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
5906 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
5907 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
5908 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
5909 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
5910 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
5911 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
5912 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
5913 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
5914 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
5915 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
5916 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
5917 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
5918 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
5919 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
5920 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
5921 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
5922 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
5923 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
5925 if (chip_id(adap) >= CHELSIO_T5) {
5926 if (stat_ctl & F_COUNTPAUSESTATRX) {
5927 p->rx_frames -= p->rx_pause;
5928 p->rx_octets -= p->rx_pause * 64;
5930 if (stat_ctl & F_COUNTPAUSEMCRX)
5931 p->rx_mcast_frames -= p->rx_pause;
5934 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
5935 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
5936 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
5937 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
5938 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
5939 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
5940 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
5941 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
5948 * t4_get_lb_stats - collect loopback port statistics
5949 * @adap: the adapter
5950 * @idx: the loopback port index
5951 * @p: the stats structure to fill
5953 * Return HW statistics for the given loopback port.
5955 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
5957 u32 bgmap = t4_get_mps_bg_map(adap, idx);
5959 #define GET_STAT(name) \
5960 t4_read_reg64(adap, \
5962 PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
5963 T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
5964 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
5966 p->octets = GET_STAT(BYTES);
5967 p->frames = GET_STAT(FRAMES);
5968 p->bcast_frames = GET_STAT(BCAST);
5969 p->mcast_frames = GET_STAT(MCAST);
5970 p->ucast_frames = GET_STAT(UCAST);
5971 p->error_frames = GET_STAT(ERROR);
5973 p->frames_64 = GET_STAT(64B);
5974 p->frames_65_127 = GET_STAT(65B_127B);
5975 p->frames_128_255 = GET_STAT(128B_255B);
5976 p->frames_256_511 = GET_STAT(256B_511B);
5977 p->frames_512_1023 = GET_STAT(512B_1023B);
5978 p->frames_1024_1518 = GET_STAT(1024B_1518B);
5979 p->frames_1519_max = GET_STAT(1519B_MAX);
5980 p->drop = GET_STAT(DROP_FRAMES);
5982 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
5983 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
5984 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
5985 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
5986 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
5987 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
5988 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
5989 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
5996 * t4_wol_magic_enable - enable/disable magic packet WoL
5997 * @adap: the adapter
5998 * @port: the physical port index
5999 * @addr: MAC address expected in magic packets, %NULL to disable
6001 * Enables/disables magic packet wake-on-LAN for the selected port.
6003 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
6006 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
6009 mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO);
6010 mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI);
6011 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
6013 mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO);
6014 mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI);
6015 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
6019 t4_write_reg(adap, mag_id_reg_l,
6020 (addr[2] << 24) | (addr[3] << 16) |
6021 (addr[4] << 8) | addr[5]);
6022 t4_write_reg(adap, mag_id_reg_h,
6023 (addr[0] << 8) | addr[1]);
6025 t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN,
6026 V_MAGICEN(addr != NULL));
6030 * t4_wol_pat_enable - enable/disable pattern-based WoL
6031 * @adap: the adapter
6032 * @port: the physical port index
6033 * @map: bitmap of which HW pattern filters to set
6034 * @mask0: byte mask for bytes 0-63 of a packet
6035 * @mask1: byte mask for bytes 64-127 of a packet
6036 * @crc: Ethernet CRC for selected bytes
6037 * @enable: enable/disable switch
6039 * Sets the pattern filters indicated in @map to mask out the bytes
6040 * specified in @mask0/@mask1 in received packets and compare the CRC of
6041 * the resulting packet against @crc. If @enable is %true pattern-based
6042 * WoL is enabled, otherwise disabled.
6044 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
6045 u64 mask0, u64 mask1, unsigned int crc, bool enable)
6051 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
6053 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
6056 t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
6062 #define EPIO_REG(name) \
6063 (is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \
6064 T5_PORT_REG(port, A_MAC_PORT_EPIO_##name))
6066 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
6067 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
6068 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
6070 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
6074 /* write byte masks */
6075 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
6076 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
6077 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
6078 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
6082 t4_write_reg(adap, EPIO_REG(DATA0), crc);
6083 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
6084 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
6085 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
6090 t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN);
6094 /* t4_mk_filtdelwr - create a delete filter WR
6095 * @ftid: the filter ID
6096 * @wr: the filter work request to populate
6097 * @qid: ingress queue to receive the delete notification
6099 * Creates a filter work request to delete the supplied filter. If @qid is
6100 * negative the delete notification is suppressed.
6102 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
6104 memset(wr, 0, sizeof(*wr));
6105 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
6106 wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
6107 wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
6108 V_FW_FILTER_WR_NOREPLY(qid < 0));
6109 wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
6111 wr->rx_chan_rx_rpl_iq =
6112 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
6115 #define INIT_CMD(var, cmd, rd_wr) do { \
6116 (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
6117 F_FW_CMD_REQUEST | \
6118 F_FW_CMD_##rd_wr); \
6119 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
6122 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
6126 struct fw_ldst_cmd c;
6128 memset(&c, 0, sizeof(c));
6129 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE);
6130 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6134 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6135 c.u.addrval.addr = cpu_to_be32(addr);
6136 c.u.addrval.val = cpu_to_be32(val);
6138 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6142 * t4_mdio_rd - read a PHY register through MDIO
6143 * @adap: the adapter
6144 * @mbox: mailbox to use for the FW command
6145 * @phy_addr: the PHY address
6146 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
6147 * @reg: the register to read
6148 * @valp: where to store the value
6150 * Issues a FW command through the given mailbox to read a PHY register.
6152 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6153 unsigned int mmd, unsigned int reg, unsigned int *valp)
6157 struct fw_ldst_cmd c;
6159 memset(&c, 0, sizeof(c));
6160 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
6161 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6162 F_FW_CMD_REQUEST | F_FW_CMD_READ |
6164 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6165 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
6166 V_FW_LDST_CMD_MMD(mmd));
6167 c.u.mdio.raddr = cpu_to_be16(reg);
6169 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6171 *valp = be16_to_cpu(c.u.mdio.rval);
6176 * t4_mdio_wr - write a PHY register through MDIO
6177 * @adap: the adapter
6178 * @mbox: mailbox to use for the FW command
6179 * @phy_addr: the PHY address
6180 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
6181 * @reg: the register to write
6182 * @valp: value to write
6184 * Issues a FW command through the given mailbox to write a PHY register.
6186 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6187 unsigned int mmd, unsigned int reg, unsigned int val)
6190 struct fw_ldst_cmd c;
6192 memset(&c, 0, sizeof(c));
6193 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
6194 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6195 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
6197 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6198 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
6199 V_FW_LDST_CMD_MMD(mmd));
6200 c.u.mdio.raddr = cpu_to_be16(reg);
6201 c.u.mdio.rval = cpu_to_be16(val);
6203 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6208 * t4_sge_decode_idma_state - decode the idma state
6209 * @adap: the adapter
6210 * @state: the state idma is stuck in
6212 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
6214 static const char * const t4_decode[] = {
6216 "IDMA_PUSH_MORE_CPL_FIFO",
6217 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6219 "IDMA_PHYSADDR_SEND_PCIEHDR",
6220 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6221 "IDMA_PHYSADDR_SEND_PAYLOAD",
6222 "IDMA_SEND_FIFO_TO_IMSG",
6223 "IDMA_FL_REQ_DATA_FL_PREP",
6224 "IDMA_FL_REQ_DATA_FL",
6226 "IDMA_FL_H_REQ_HEADER_FL",
6227 "IDMA_FL_H_SEND_PCIEHDR",
6228 "IDMA_FL_H_PUSH_CPL_FIFO",
6229 "IDMA_FL_H_SEND_CPL",
6230 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6231 "IDMA_FL_H_SEND_IP_HDR",
6232 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6233 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6234 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6235 "IDMA_FL_D_SEND_PCIEHDR",
6236 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6237 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6238 "IDMA_FL_SEND_PCIEHDR",
6239 "IDMA_FL_PUSH_CPL_FIFO",
6241 "IDMA_FL_SEND_PAYLOAD_FIRST",
6242 "IDMA_FL_SEND_PAYLOAD",
6243 "IDMA_FL_REQ_NEXT_DATA_FL",
6244 "IDMA_FL_SEND_NEXT_PCIEHDR",
6245 "IDMA_FL_SEND_PADDING",
6246 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6247 "IDMA_FL_SEND_FIFO_TO_IMSG",
6248 "IDMA_FL_REQ_DATAFL_DONE",
6249 "IDMA_FL_REQ_HEADERFL_DONE",
6251 static const char * const t5_decode[] = {
6254 "IDMA_PUSH_MORE_CPL_FIFO",
6255 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6256 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6257 "IDMA_PHYSADDR_SEND_PCIEHDR",
6258 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6259 "IDMA_PHYSADDR_SEND_PAYLOAD",
6260 "IDMA_SEND_FIFO_TO_IMSG",
6261 "IDMA_FL_REQ_DATA_FL",
6263 "IDMA_FL_DROP_SEND_INC",
6264 "IDMA_FL_H_REQ_HEADER_FL",
6265 "IDMA_FL_H_SEND_PCIEHDR",
6266 "IDMA_FL_H_PUSH_CPL_FIFO",
6267 "IDMA_FL_H_SEND_CPL",
6268 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6269 "IDMA_FL_H_SEND_IP_HDR",
6270 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6271 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6272 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6273 "IDMA_FL_D_SEND_PCIEHDR",
6274 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6275 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6276 "IDMA_FL_SEND_PCIEHDR",
6277 "IDMA_FL_PUSH_CPL_FIFO",
6279 "IDMA_FL_SEND_PAYLOAD_FIRST",
6280 "IDMA_FL_SEND_PAYLOAD",
6281 "IDMA_FL_REQ_NEXT_DATA_FL",
6282 "IDMA_FL_SEND_NEXT_PCIEHDR",
6283 "IDMA_FL_SEND_PADDING",
6284 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6286 static const char * const t6_decode[] = {
6288 "IDMA_PUSH_MORE_CPL_FIFO",
6289 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6290 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6291 "IDMA_PHYSADDR_SEND_PCIEHDR",
6292 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6293 "IDMA_PHYSADDR_SEND_PAYLOAD",
6294 "IDMA_FL_REQ_DATA_FL",
6296 "IDMA_FL_DROP_SEND_INC",
6297 "IDMA_FL_H_REQ_HEADER_FL",
6298 "IDMA_FL_H_SEND_PCIEHDR",
6299 "IDMA_FL_H_PUSH_CPL_FIFO",
6300 "IDMA_FL_H_SEND_CPL",
6301 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6302 "IDMA_FL_H_SEND_IP_HDR",
6303 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6304 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6305 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6306 "IDMA_FL_D_SEND_PCIEHDR",
6307 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6308 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6309 "IDMA_FL_SEND_PCIEHDR",
6310 "IDMA_FL_PUSH_CPL_FIFO",
6312 "IDMA_FL_SEND_PAYLOAD_FIRST",
6313 "IDMA_FL_SEND_PAYLOAD",
6314 "IDMA_FL_REQ_NEXT_DATA_FL",
6315 "IDMA_FL_SEND_NEXT_PCIEHDR",
6316 "IDMA_FL_SEND_PADDING",
6317 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6319 static const u32 sge_regs[] = {
6320 A_SGE_DEBUG_DATA_LOW_INDEX_2,
6321 A_SGE_DEBUG_DATA_LOW_INDEX_3,
6322 A_SGE_DEBUG_DATA_HIGH_INDEX_10,
6324 const char * const *sge_idma_decode;
6325 int sge_idma_decode_nstates;
6327 unsigned int chip_version = chip_id(adapter);
6329 /* Select the right set of decode strings to dump depending on the
6330 * adapter chip type.
6332 switch (chip_version) {
6334 sge_idma_decode = (const char * const *)t4_decode;
6335 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
6339 sge_idma_decode = (const char * const *)t5_decode;
6340 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
6344 sge_idma_decode = (const char * const *)t6_decode;
6345 sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
6349 CH_ERR(adapter, "Unsupported chip version %d\n", chip_version);
6353 if (state < sge_idma_decode_nstates)
6354 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
6356 CH_WARN(adapter, "idma state %d unknown\n", state);
6358 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
6359 CH_WARN(adapter, "SGE register %#x value %#x\n",
6360 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
6364 * t4_sge_ctxt_flush - flush the SGE context cache
6365 * @adap: the adapter
6366 * @mbox: mailbox to use for the FW command
6368 * Issues a FW command through the given mailbox to flush the
6369 * SGE context cache.
6371 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
6375 struct fw_ldst_cmd c;
6377 memset(&c, 0, sizeof(c));
6378 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC);
6379 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6380 F_FW_CMD_REQUEST | F_FW_CMD_READ |
6382 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6383 c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH);
6385 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6390 * t4_fw_hello - establish communication with FW
6391 * @adap: the adapter
6392 * @mbox: mailbox to use for the FW command
6393 * @evt_mbox: mailbox to receive async FW events
6394 * @master: specifies the caller's willingness to be the device master
6395 * @state: returns the current device state (if non-NULL)
6397 * Issues a command to establish communication with FW. Returns either
6398 * an error (negative integer) or the mailbox of the Master PF.
6400 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
6401 enum dev_master master, enum dev_state *state)
6404 struct fw_hello_cmd c;
6406 unsigned int master_mbox;
6407 int retries = FW_CMD_HELLO_RETRIES;
6410 memset(&c, 0, sizeof(c));
6411 INIT_CMD(c, HELLO, WRITE);
6412 c.err_to_clearinit = cpu_to_be32(
6413 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
6414 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
6415 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ?
6416 mbox : M_FW_HELLO_CMD_MBMASTER) |
6417 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
6418 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
6419 F_FW_HELLO_CMD_CLEARINIT);
6422 * Issue the HELLO command to the firmware. If it's not successful
6423 * but indicates that we got a "busy" or "timeout" condition, retry
6424 * the HELLO until we exhaust our retry limit. If we do exceed our
6425 * retry limit, check to see if the firmware left us any error
6426 * information and report that if so ...
6428 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6429 if (ret != FW_SUCCESS) {
6430 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
6432 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
6433 t4_report_fw_error(adap);
6437 v = be32_to_cpu(c.err_to_clearinit);
6438 master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
6440 if (v & F_FW_HELLO_CMD_ERR)
6441 *state = DEV_STATE_ERR;
6442 else if (v & F_FW_HELLO_CMD_INIT)
6443 *state = DEV_STATE_INIT;
6445 *state = DEV_STATE_UNINIT;
6449 * If we're not the Master PF then we need to wait around for the
6450 * Master PF Driver to finish setting up the adapter.
6452 * Note that we also do this wait if we're a non-Master-capable PF and
6453 * there is no current Master PF; a Master PF may show up momentarily
6454 * and we wouldn't want to fail pointlessly. (This can happen when an
6455 * OS loads lots of different drivers rapidly at the same time). In
6456 * this case, the Master PF returned by the firmware will be
6457 * M_PCIE_FW_MASTER so the test below will work ...
6459 if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
6460 master_mbox != mbox) {
6461 int waiting = FW_CMD_HELLO_TIMEOUT;
6464 * Wait for the firmware to either indicate an error or
6465 * initialized state. If we see either of these we bail out
6466 * and report the issue to the caller. If we exhaust the
6467 * "hello timeout" and we haven't exhausted our retries, try
6468 * again. Otherwise bail with a timeout error.
6477 * If neither Error nor Initialialized are indicated
6478 * by the firmware keep waiting till we exhaust our
6479 * timeout ... and then retry if we haven't exhausted
6482 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
6483 if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
6494 * We either have an Error or Initialized condition
6495 * report errors preferentially.
6498 if (pcie_fw & F_PCIE_FW_ERR)
6499 *state = DEV_STATE_ERR;
6500 else if (pcie_fw & F_PCIE_FW_INIT)
6501 *state = DEV_STATE_INIT;
6505 * If we arrived before a Master PF was selected and
6506 * there's not a valid Master PF, grab its identity
6509 if (master_mbox == M_PCIE_FW_MASTER &&
6510 (pcie_fw & F_PCIE_FW_MASTER_VLD))
6511 master_mbox = G_PCIE_FW_MASTER(pcie_fw);
6520 * t4_fw_bye - end communication with FW
6521 * @adap: the adapter
6522 * @mbox: mailbox to use for the FW command
6524 * Issues a command to terminate communication with FW.
6526 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
6528 struct fw_bye_cmd c;
6530 memset(&c, 0, sizeof(c));
6531 INIT_CMD(c, BYE, WRITE);
6532 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6536 * t4_fw_reset - issue a reset to FW
6537 * @adap: the adapter
6538 * @mbox: mailbox to use for the FW command
6539 * @reset: specifies the type of reset to perform
6541 * Issues a reset command of the specified type to FW.
6543 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
6545 struct fw_reset_cmd c;
6547 memset(&c, 0, sizeof(c));
6548 INIT_CMD(c, RESET, WRITE);
6549 c.val = cpu_to_be32(reset);
6550 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6554 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
6555 * @adap: the adapter
6556 * @mbox: mailbox to use for the FW RESET command (if desired)
6557 * @force: force uP into RESET even if FW RESET command fails
6559 * Issues a RESET command to firmware (if desired) with a HALT indication
6560 * and then puts the microprocessor into RESET state. The RESET command
6561 * will only be issued if a legitimate mailbox is provided (mbox <=
6562 * M_PCIE_FW_MASTER).
6564 * This is generally used in order for the host to safely manipulate the
6565 * adapter without fear of conflicting with whatever the firmware might
6566 * be doing. The only way out of this state is to RESTART the firmware
6569 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
6574 * If a legitimate mailbox is provided, issue a RESET command
6575 * with a HALT indication.
6577 if (mbox <= M_PCIE_FW_MASTER) {
6578 struct fw_reset_cmd c;
6580 memset(&c, 0, sizeof(c));
6581 INIT_CMD(c, RESET, WRITE);
6582 c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
6583 c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
6584 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6588 * Normally we won't complete the operation if the firmware RESET
6589 * command fails but if our caller insists we'll go ahead and put the
6590 * uP into RESET. This can be useful if the firmware is hung or even
6591 * missing ... We'll have to take the risk of putting the uP into
6592 * RESET without the cooperation of firmware in that case.
6594 * We also force the firmware's HALT flag to be on in case we bypassed
6595 * the firmware RESET command above or we're dealing with old firmware
6596 * which doesn't have the HALT capability. This will serve as a flag
6597 * for the incoming firmware to know that it's coming out of a HALT
6598 * rather than a RESET ... if it's new enough to understand that ...
6600 if (ret == 0 || force) {
6601 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
6602 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
6607 * And we always return the result of the firmware RESET command
6608 * even when we force the uP into RESET ...
6614 * t4_fw_restart - restart the firmware by taking the uP out of RESET
6615 * @adap: the adapter
6616 * @reset: if we want to do a RESET to restart things
6618 * Restart firmware previously halted by t4_fw_halt(). On successful
6619 * return the previous PF Master remains as the new PF Master and there
6620 * is no need to issue a new HELLO command, etc.
6622 * We do this in two ways:
6624 * 1. If we're dealing with newer firmware we'll simply want to take
6625 * the chip's microprocessor out of RESET. This will cause the
6626 * firmware to start up from its start vector. And then we'll loop
6627 * until the firmware indicates it's started again (PCIE_FW.HALT
6628 * reset to 0) or we timeout.
6630 * 2. If we're dealing with older firmware then we'll need to RESET
6631 * the chip since older firmware won't recognize the PCIE_FW.HALT
6632 * flag and automatically RESET itself on startup.
6634 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
6638 * Since we're directing the RESET instead of the firmware
6639 * doing it automatically, we need to clear the PCIE_FW.HALT
6642 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
6645 * If we've been given a valid mailbox, first try to get the
6646 * firmware to do the RESET. If that works, great and we can
6647 * return success. Otherwise, if we haven't been given a
6648 * valid mailbox or the RESET command failed, fall back to
6649 * hitting the chip with a hammer.
6651 if (mbox <= M_PCIE_FW_MASTER) {
6652 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
6654 if (t4_fw_reset(adap, mbox,
6655 F_PIORST | F_PIORSTMODE) == 0)
6659 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
6664 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
6665 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
6666 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
6677 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
6678 * @adap: the adapter
6679 * @mbox: mailbox to use for the FW RESET command (if desired)
6680 * @fw_data: the firmware image to write
6682 * @force: force upgrade even if firmware doesn't cooperate
6684 * Perform all of the steps necessary for upgrading an adapter's
6685 * firmware image. Normally this requires the cooperation of the
6686 * existing firmware in order to halt all existing activities
6687 * but if an invalid mailbox token is passed in we skip that step
6688 * (though we'll still put the adapter microprocessor into RESET in
6691 * On successful return the new firmware will have been loaded and
6692 * the adapter will have been fully RESET losing all previous setup
6693 * state. On unsuccessful return the adapter may be completely hosed ...
6694 * positive errno indicates that the adapter is ~probably~ intact, a
6695 * negative errno indicates that things are looking bad ...
6697 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
6698 const u8 *fw_data, unsigned int size, int force)
6700 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
6701 unsigned int bootstrap =
6702 be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
6705 if (!t4_fw_matches_chip(adap, fw_hdr))
6709 ret = t4_fw_halt(adap, mbox, force);
6710 if (ret < 0 && !force)
6714 ret = t4_load_fw(adap, fw_data, size);
6715 if (ret < 0 || bootstrap)
6719 * Older versions of the firmware don't understand the new
6720 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
6721 * restart. So for newly loaded older firmware we'll have to do the
6722 * RESET for it so it starts up on a clean slate. We can tell if
6723 * the newly loaded firmware will handle this right by checking
6724 * its header flags to see if it advertises the capability.
6726 reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
6727 return t4_fw_restart(adap, mbox, reset);
6731 * t4_fw_initialize - ask FW to initialize the device
6732 * @adap: the adapter
6733 * @mbox: mailbox to use for the FW command
6735 * Issues a command to FW to partially initialize the device. This
6736 * performs initialization that generally doesn't depend on user input.
6738 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
6740 struct fw_initialize_cmd c;
6742 memset(&c, 0, sizeof(c));
6743 INIT_CMD(c, INITIALIZE, WRITE);
6744 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6748 * t4_query_params_rw - query FW or device parameters
6749 * @adap: the adapter
6750 * @mbox: mailbox to use for the FW command
6753 * @nparams: the number of parameters
6754 * @params: the parameter names
6755 * @val: the parameter values
6756 * @rw: Write and read flag
6758 * Reads the value of FW or device parameters. Up to 7 parameters can be
6761 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
6762 unsigned int vf, unsigned int nparams, const u32 *params,
6766 struct fw_params_cmd c;
6767 __be32 *p = &c.param[0].mnem;
6772 memset(&c, 0, sizeof(c));
6773 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
6774 F_FW_CMD_REQUEST | F_FW_CMD_READ |
6775 V_FW_PARAMS_CMD_PFN(pf) |
6776 V_FW_PARAMS_CMD_VFN(vf));
6777 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6779 for (i = 0; i < nparams; i++) {
6780 *p++ = cpu_to_be32(*params++);
6782 *p = cpu_to_be32(*(val + i));
6786 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6788 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
6789 *val++ = be32_to_cpu(*p);
6793 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
6794 unsigned int vf, unsigned int nparams, const u32 *params,
6797 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
6801 * t4_set_params_timeout - sets FW or device parameters
6802 * @adap: the adapter
6803 * @mbox: mailbox to use for the FW command
6806 * @nparams: the number of parameters
6807 * @params: the parameter names
6808 * @val: the parameter values
6809 * @timeout: the timeout time
6811 * Sets the value of FW or device parameters. Up to 7 parameters can be
6812 * specified at once.
6814 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
6815 unsigned int pf, unsigned int vf,
6816 unsigned int nparams, const u32 *params,
6817 const u32 *val, int timeout)
6819 struct fw_params_cmd c;
6820 __be32 *p = &c.param[0].mnem;
6825 memset(&c, 0, sizeof(c));
6826 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
6827 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
6828 V_FW_PARAMS_CMD_PFN(pf) |
6829 V_FW_PARAMS_CMD_VFN(vf));
6830 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6833 *p++ = cpu_to_be32(*params++);
6834 *p++ = cpu_to_be32(*val++);
6837 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
6841 * t4_set_params - sets FW or device parameters
6842 * @adap: the adapter
6843 * @mbox: mailbox to use for the FW command
6846 * @nparams: the number of parameters
6847 * @params: the parameter names
6848 * @val: the parameter values
6850 * Sets the value of FW or device parameters. Up to 7 parameters can be
6851 * specified at once.
6853 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
6854 unsigned int vf, unsigned int nparams, const u32 *params,
6857 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
6858 FW_CMD_MAX_TIMEOUT);
6862 * t4_cfg_pfvf - configure PF/VF resource limits
6863 * @adap: the adapter
6864 * @mbox: mailbox to use for the FW command
6865 * @pf: the PF being configured
6866 * @vf: the VF being configured
6867 * @txq: the max number of egress queues
6868 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
6869 * @rxqi: the max number of interrupt-capable ingress queues
6870 * @rxq: the max number of interruptless ingress queues
6871 * @tc: the PCI traffic class
6872 * @vi: the max number of virtual interfaces
6873 * @cmask: the channel access rights mask for the PF/VF
6874 * @pmask: the port access rights mask for the PF/VF
6875 * @nexact: the maximum number of exact MPS filters
6876 * @rcaps: read capabilities
6877 * @wxcaps: write/execute capabilities
6879 * Configures resource limits and capabilities for a physical or virtual
6882 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
6883 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
6884 unsigned int rxqi, unsigned int rxq, unsigned int tc,
6885 unsigned int vi, unsigned int cmask, unsigned int pmask,
6886 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
6888 struct fw_pfvf_cmd c;
6890 memset(&c, 0, sizeof(c));
6891 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
6892 F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
6893 V_FW_PFVF_CMD_VFN(vf));
6894 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6895 c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
6896 V_FW_PFVF_CMD_NIQ(rxq));
6897 c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) |
6898 V_FW_PFVF_CMD_PMASK(pmask) |
6899 V_FW_PFVF_CMD_NEQ(txq));
6900 c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) |
6901 V_FW_PFVF_CMD_NVI(vi) |
6902 V_FW_PFVF_CMD_NEXACTF(nexact));
6903 c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) |
6904 V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
6905 V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
6906 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6910 * t4_alloc_vi_func - allocate a virtual interface
6911 * @adap: the adapter
6912 * @mbox: mailbox to use for the FW command
6913 * @port: physical port associated with the VI
6914 * @pf: the PF owning the VI
6915 * @vf: the VF owning the VI
6916 * @nmac: number of MAC addresses needed (1 to 5)
6917 * @mac: the MAC addresses of the VI
6918 * @rss_size: size of RSS table slice associated with this VI
6919 * @portfunc: which Port Application Function MAC Address is desired
6920 * @idstype: Intrusion Detection Type
6922 * Allocates a virtual interface for the given physical port. If @mac is
6923 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
6924 * If @rss_size is %NULL the VI is not assigned any RSS slice by FW.
6925 * @mac should be large enough to hold @nmac Ethernet addresses, they are
6926 * stored consecutively so the space needed is @nmac * 6 bytes.
6927 * Returns a negative error number or the non-negative VI id.
6929 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
6930 unsigned int port, unsigned int pf, unsigned int vf,
6931 unsigned int nmac, u8 *mac, u16 *rss_size,
6932 unsigned int portfunc, unsigned int idstype)
6937 memset(&c, 0, sizeof(c));
6938 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
6939 F_FW_CMD_WRITE | F_FW_CMD_EXEC |
6940 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
6941 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
6942 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
6943 V_FW_VI_CMD_FUNC(portfunc));
6944 c.portid_pkd = V_FW_VI_CMD_PORTID(port);
6947 c.norss_rsssize = F_FW_VI_CMD_NORSS;
6949 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6954 memcpy(mac, c.mac, sizeof(c.mac));
6957 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
6959 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
6961 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
6963 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
6967 *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
6968 return G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid));
6972 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface
6973 * @adap: the adapter
6974 * @mbox: mailbox to use for the FW command
6975 * @port: physical port associated with the VI
6976 * @pf: the PF owning the VI
6977 * @vf: the VF owning the VI
6978 * @nmac: number of MAC addresses needed (1 to 5)
6979 * @mac: the MAC addresses of the VI
6980 * @rss_size: size of RSS table slice associated with this VI
6982 * backwards compatible and convieniance routine to allocate a Virtual
6983 * Interface with a Ethernet Port Application Function and Intrustion
6984 * Detection System disabled.
6986 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
6987 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
6990 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
6995 * t4_free_vi - free a virtual interface
6996 * @adap: the adapter
6997 * @mbox: mailbox to use for the FW command
6998 * @pf: the PF owning the VI
6999 * @vf: the VF owning the VI
7000 * @viid: virtual interface identifiler
7002 * Free a previously allocated virtual interface.
7004 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
7005 unsigned int vf, unsigned int viid)
7009 memset(&c, 0, sizeof(c));
7010 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
7013 V_FW_VI_CMD_PFN(pf) |
7014 V_FW_VI_CMD_VFN(vf));
7015 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
7016 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
7018 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7022 * t4_set_rxmode - set Rx properties of a virtual interface
7023 * @adap: the adapter
7024 * @mbox: mailbox to use for the FW command
7026 * @mtu: the new MTU or -1
7027 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
7028 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
7029 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
7030 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
7031 * @sleep_ok: if true we may sleep while awaiting command completion
7033 * Sets Rx properties of a virtual interface.
7035 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
7036 int mtu, int promisc, int all_multi, int bcast, int vlanex,
7039 struct fw_vi_rxmode_cmd c;
7041 /* convert to FW values */
7043 mtu = M_FW_VI_RXMODE_CMD_MTU;
7045 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
7047 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
7049 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
7051 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
7053 memset(&c, 0, sizeof(c));
7054 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
7055 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7056 V_FW_VI_RXMODE_CMD_VIID(viid));
7057 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7059 cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
7060 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
7061 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
7062 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
7063 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
7064 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7068 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
7069 * @adap: the adapter
7070 * @mbox: mailbox to use for the FW command
7072 * @free: if true any existing filters for this VI id are first removed
7073 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
7074 * @addr: the MAC address(es)
7075 * @idx: where to store the index of each allocated filter
7076 * @hash: pointer to hash address filter bitmap
7077 * @sleep_ok: call is allowed to sleep
7079 * Allocates an exact-match filter for each of the supplied addresses and
7080 * sets it to the corresponding address. If @idx is not %NULL it should
7081 * have at least @naddr entries, each of which will be set to the index of
7082 * the filter allocated for the corresponding MAC address. If a filter
7083 * could not be allocated for an address its index is set to 0xffff.
7084 * If @hash is not %NULL addresses that fail to allocate an exact filter
7085 * are hashed and update the hash filter bitmap pointed at by @hash.
7087 * Returns a negative error number or the number of filters allocated.
7089 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
7090 unsigned int viid, bool free, unsigned int naddr,
7091 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
7093 int offset, ret = 0;
7094 struct fw_vi_mac_cmd c;
7095 unsigned int nfilters = 0;
7096 unsigned int max_naddr = adap->chip_params->mps_tcam_size;
7097 unsigned int rem = naddr;
7099 if (naddr > max_naddr)
7102 for (offset = 0; offset < naddr ; /**/) {
7103 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
7105 : ARRAY_SIZE(c.u.exact));
7106 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
7107 u.exact[fw_naddr]), 16);
7108 struct fw_vi_mac_exact *p;
7111 memset(&c, 0, sizeof(c));
7112 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
7115 V_FW_CMD_EXEC(free) |
7116 V_FW_VI_MAC_CMD_VIID(viid));
7117 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) |
7118 V_FW_CMD_LEN16(len16));
7120 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7122 cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
7123 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
7124 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
7128 * It's okay if we run out of space in our MAC address arena.
7129 * Some of the addresses we submit may get stored so we need
7130 * to run through the reply to see what the results were ...
7132 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7133 if (ret && ret != -FW_ENOMEM)
7136 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7137 u16 index = G_FW_VI_MAC_CMD_IDX(
7138 be16_to_cpu(p->valid_to_idx));
7141 idx[offset+i] = (index >= max_naddr
7144 if (index < max_naddr)
7147 *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
7155 if (ret == 0 || ret == -FW_ENOMEM)
7161 * t4_change_mac - modifies the exact-match filter for a MAC address
7162 * @adap: the adapter
7163 * @mbox: mailbox to use for the FW command
7165 * @idx: index of existing filter for old value of MAC address, or -1
7166 * @addr: the new MAC address value
7167 * @persist: whether a new MAC allocation should be persistent
7168 * @add_smt: if true also add the address to the HW SMT
7170 * Modifies an exact-match filter and sets it to the new MAC address if
7171 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
7172 * latter case the address is added persistently if @persist is %true.
7174 * Note that in general it is not possible to modify the value of a given
7175 * filter so the generic way to modify an address filter is to free the one
7176 * being used by the old address value and allocate a new filter for the
7177 * new address value.
7179 * Returns a negative error number or the index of the filter with the new
7180 * MAC value. Note that this index may differ from @idx.
7182 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
7183 int idx, const u8 *addr, bool persist, bool add_smt)
7186 struct fw_vi_mac_cmd c;
7187 struct fw_vi_mac_exact *p = c.u.exact;
7188 unsigned int max_mac_addr = adap->chip_params->mps_tcam_size;
7190 if (idx < 0) /* new allocation */
7191 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
7192 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
7194 memset(&c, 0, sizeof(c));
7195 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
7196 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7197 V_FW_VI_MAC_CMD_VIID(viid));
7198 c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1));
7199 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
7200 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
7201 V_FW_VI_MAC_CMD_IDX(idx));
7202 memcpy(p->macaddr, addr, sizeof(p->macaddr));
7204 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7206 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
7207 if (ret >= max_mac_addr)
7214 * t4_set_addr_hash - program the MAC inexact-match hash filter
7215 * @adap: the adapter
7216 * @mbox: mailbox to use for the FW command
7218 * @ucast: whether the hash filter should also match unicast addresses
7219 * @vec: the value to be written to the hash filter
7220 * @sleep_ok: call is allowed to sleep
7222 * Sets the 64-bit inexact-match hash filter for a virtual interface.
7224 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
7225 bool ucast, u64 vec, bool sleep_ok)
7227 struct fw_vi_mac_cmd c;
7230 memset(&c, 0, sizeof(c));
7231 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
7232 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7233 V_FW_VI_ENABLE_CMD_VIID(viid));
7234 val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) |
7235 V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1);
7236 c.freemacs_to_len16 = cpu_to_be32(val);
7237 c.u.hash.hashvec = cpu_to_be64(vec);
7238 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7242 * t4_enable_vi_params - enable/disable a virtual interface
7243 * @adap: the adapter
7244 * @mbox: mailbox to use for the FW command
7246 * @rx_en: 1=enable Rx, 0=disable Rx
7247 * @tx_en: 1=enable Tx, 0=disable Tx
7248 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
7250 * Enables/disables a virtual interface. Note that setting DCB Enable
7251 * only makes sense when enabling a Virtual Interface ...
7253 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
7254 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
7256 struct fw_vi_enable_cmd c;
7258 memset(&c, 0, sizeof(c));
7259 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
7260 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7261 V_FW_VI_ENABLE_CMD_VIID(viid));
7262 c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
7263 V_FW_VI_ENABLE_CMD_EEN(tx_en) |
7264 V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
7266 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
7270 * t4_enable_vi - enable/disable a virtual interface
7271 * @adap: the adapter
7272 * @mbox: mailbox to use for the FW command
7274 * @rx_en: 1=enable Rx, 0=disable Rx
7275 * @tx_en: 1=enable Tx, 0=disable Tx
7277 * Enables/disables a virtual interface. Note that setting DCB Enable
7278 * only makes sense when enabling a Virtual Interface ...
7280 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
7281 bool rx_en, bool tx_en)
7283 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
7287 * t4_identify_port - identify a VI's port by blinking its LED
7288 * @adap: the adapter
7289 * @mbox: mailbox to use for the FW command
7291 * @nblinks: how many times to blink LED at 2.5 Hz
7293 * Identifies a VI's port by blinking its LED.
7295 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
7296 unsigned int nblinks)
7298 struct fw_vi_enable_cmd c;
7300 memset(&c, 0, sizeof(c));
7301 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
7302 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7303 V_FW_VI_ENABLE_CMD_VIID(viid));
7304 c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
7305 c.blinkdur = cpu_to_be16(nblinks);
7306 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7310 * t4_iq_stop - stop an ingress queue and its FLs
7311 * @adap: the adapter
7312 * @mbox: mailbox to use for the FW command
7313 * @pf: the PF owning the queues
7314 * @vf: the VF owning the queues
7315 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
7316 * @iqid: ingress queue id
7317 * @fl0id: FL0 queue id or 0xffff if no attached FL0
7318 * @fl1id: FL1 queue id or 0xffff if no attached FL1
7320 * Stops an ingress queue and its associated FLs, if any. This causes
7321 * any current or future data/messages destined for these queues to be
7324 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
7325 unsigned int vf, unsigned int iqtype, unsigned int iqid,
7326 unsigned int fl0id, unsigned int fl1id)
7330 memset(&c, 0, sizeof(c));
7331 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
7332 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
7333 V_FW_IQ_CMD_VFN(vf));
7334 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c));
7335 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
7336 c.iqid = cpu_to_be16(iqid);
7337 c.fl0id = cpu_to_be16(fl0id);
7338 c.fl1id = cpu_to_be16(fl1id);
7339 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7343 * t4_iq_free - free an ingress queue and its FLs
7344 * @adap: the adapter
7345 * @mbox: mailbox to use for the FW command
7346 * @pf: the PF owning the queues
7347 * @vf: the VF owning the queues
7348 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
7349 * @iqid: ingress queue id
7350 * @fl0id: FL0 queue id or 0xffff if no attached FL0
7351 * @fl1id: FL1 queue id or 0xffff if no attached FL1
7353 * Frees an ingress queue and its associated FLs, if any.
7355 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7356 unsigned int vf, unsigned int iqtype, unsigned int iqid,
7357 unsigned int fl0id, unsigned int fl1id)
7361 memset(&c, 0, sizeof(c));
7362 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
7363 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
7364 V_FW_IQ_CMD_VFN(vf));
7365 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
7366 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
7367 c.iqid = cpu_to_be16(iqid);
7368 c.fl0id = cpu_to_be16(fl0id);
7369 c.fl1id = cpu_to_be16(fl1id);
7370 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7374 * t4_eth_eq_free - free an Ethernet egress queue
7375 * @adap: the adapter
7376 * @mbox: mailbox to use for the FW command
7377 * @pf: the PF owning the queue
7378 * @vf: the VF owning the queue
7379 * @eqid: egress queue id
7381 * Frees an Ethernet egress queue.
7383 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7384 unsigned int vf, unsigned int eqid)
7386 struct fw_eq_eth_cmd c;
7388 memset(&c, 0, sizeof(c));
7389 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
7390 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7391 V_FW_EQ_ETH_CMD_PFN(pf) |
7392 V_FW_EQ_ETH_CMD_VFN(vf));
7393 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
7394 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
7395 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7399 * t4_ctrl_eq_free - free a control egress queue
7400 * @adap: the adapter
7401 * @mbox: mailbox to use for the FW command
7402 * @pf: the PF owning the queue
7403 * @vf: the VF owning the queue
7404 * @eqid: egress queue id
7406 * Frees a control egress queue.
7408 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7409 unsigned int vf, unsigned int eqid)
7411 struct fw_eq_ctrl_cmd c;
7413 memset(&c, 0, sizeof(c));
7414 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
7415 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7416 V_FW_EQ_CTRL_CMD_PFN(pf) |
7417 V_FW_EQ_CTRL_CMD_VFN(vf));
7418 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
7419 c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
7420 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7424 * t4_ofld_eq_free - free an offload egress queue
7425 * @adap: the adapter
7426 * @mbox: mailbox to use for the FW command
7427 * @pf: the PF owning the queue
7428 * @vf: the VF owning the queue
7429 * @eqid: egress queue id
7431 * Frees a control egress queue.
7433 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7434 unsigned int vf, unsigned int eqid)
7436 struct fw_eq_ofld_cmd c;
7438 memset(&c, 0, sizeof(c));
7439 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) |
7440 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7441 V_FW_EQ_OFLD_CMD_PFN(pf) |
7442 V_FW_EQ_OFLD_CMD_VFN(vf));
7443 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
7444 c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid));
7445 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7449 * t4_link_down_rc_str - return a string for a Link Down Reason Code
7450 * @link_down_rc: Link Down Reason Code
7452 * Returns a string representation of the Link Down Reason Code.
7454 const char *t4_link_down_rc_str(unsigned char link_down_rc)
7456 static const char *reason[] = {
7459 "Auto-negotiation Failure",
7461 "Insufficient Airflow",
7462 "Unable To Determine Reason",
7463 "No RX Signal Detected",
7467 if (link_down_rc >= ARRAY_SIZE(reason))
7468 return "Bad Reason Code";
7470 return reason[link_down_rc];
7474 * t4_handle_fw_rpl - process a FW reply message
7475 * @adap: the adapter
7476 * @rpl: start of the FW message
7478 * Processes a FW message, such as link state change messages.
7480 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
7482 u8 opcode = *(const u8 *)rpl;
7483 const struct fw_port_cmd *p = (const void *)rpl;
7484 unsigned int action =
7485 G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
7487 if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
7488 /* link/module state change message */
7489 int speed = 0, fc = 0, i;
7490 int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
7491 struct port_info *pi = NULL;
7492 struct link_config *lc;
7493 u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
7494 int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
7495 u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
7497 if (stat & F_FW_PORT_CMD_RXPAUSE)
7499 if (stat & F_FW_PORT_CMD_TXPAUSE)
7501 if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
7503 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
7505 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
7507 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
7509 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
7511 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
7514 for_each_port(adap, i) {
7515 pi = adap2pinfo(adap, i);
7516 if (pi->tx_chan == chan)
7521 if (mod != pi->mod_type) {
7523 t4_os_portmod_changed(adap, i);
7525 if (link_ok != lc->link_ok || speed != lc->speed ||
7526 fc != lc->fc) { /* something changed */
7527 if (!link_ok && lc->link_ok)
7528 lc->link_down_rc = G_FW_PORT_CMD_LINKDNRC(stat);
7529 lc->link_ok = link_ok;
7532 lc->supported = be16_to_cpu(p->u.info.pcap);
7533 lc->lp_advertising = be16_to_cpu(p->u.info.lpacap);
7534 t4_os_link_changed(adap, i, link_ok);
7537 CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode);
7544 * get_pci_mode - determine a card's PCI mode
7545 * @adapter: the adapter
7546 * @p: where to store the PCI settings
7548 * Determines a card's PCI mode and associated parameters, such as speed
7551 static void get_pci_mode(struct adapter *adapter,
7552 struct pci_params *p)
7557 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
7559 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
7560 p->speed = val & PCI_EXP_LNKSTA_CLS;
7561 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
7566 * init_link_config - initialize a link's SW state
7567 * @lc: structure holding the link state
7568 * @pcaps: supported link capabilities
7569 * @acaps: advertised link capabilities
7571 * Initializes the SW state maintained for each link, including the link's
7572 * capabilities and default speed/flow-control/autonegotiation settings.
7574 static void init_link_config(struct link_config *lc, unsigned int pcaps,
7579 lc->supported = pcaps;
7580 lc->lp_advertising = 0;
7581 lc->requested_speed = 0;
7583 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
7585 lc->link_down_rc = 255;
7588 if (acaps & FW_PORT_CAP_FEC_RS)
7590 if (acaps & FW_PORT_CAP_FEC_BASER_RS)
7591 fec |= FEC_BASER_RS;
7592 if (acaps & FW_PORT_CAP_FEC_RESERVED)
7593 fec |= FEC_RESERVED;
7594 lc->requested_fec = lc->fec = fec;
7596 if (lc->supported & FW_PORT_CAP_ANEG) {
7597 lc->advertising = lc->supported & ADVERT_MASK;
7598 lc->autoneg = AUTONEG_ENABLE;
7599 lc->requested_fc |= PAUSE_AUTONEG;
7601 lc->advertising = 0;
7602 lc->autoneg = AUTONEG_DISABLE;
7607 u32 vendor_and_model_id;
7611 int t4_get_flash_params(struct adapter *adapter)
7614 * Table for non-Numonix supported flash parts. Numonix parts are left
7615 * to the preexisting well-tested code. All flash parts have 64KB
7618 static struct flash_desc supported_flash[] = {
7619 { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
7625 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
7627 ret = sf1_read(adapter, 3, 0, 1, &info);
7628 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
7632 for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret)
7633 if (supported_flash[ret].vendor_and_model_id == info) {
7634 adapter->params.sf_size = supported_flash[ret].size_mb;
7635 adapter->params.sf_nsec =
7636 adapter->params.sf_size / SF_SEC_SIZE;
7640 if ((info & 0xff) != 0x20) /* not a Numonix flash */
7642 info >>= 16; /* log2 of size */
7643 if (info >= 0x14 && info < 0x18)
7644 adapter->params.sf_nsec = 1 << (info - 16);
7645 else if (info == 0x18)
7646 adapter->params.sf_nsec = 64;
7649 adapter->params.sf_size = 1 << info;
7652 * We should ~probably~ reject adapters with FLASHes which are too
7653 * small but we have some legacy FPGAs with small FLASHes that we'd
7654 * still like to use. So instead we emit a scary message ...
7656 if (adapter->params.sf_size < FLASH_MIN_SIZE)
7657 CH_WARN(adapter, "WARNING!!! FLASH size %#x < %#x!!!\n",
7658 adapter->params.sf_size, FLASH_MIN_SIZE);
7663 static void set_pcie_completion_timeout(struct adapter *adapter,
7669 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
7671 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
7674 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
7678 const struct chip_params *t4_get_chip_params(int chipid)
7680 static const struct chip_params chip_params[] = {
7684 .pm_stats_cnt = PM_NSTATS,
7685 .cng_ch_bits_log = 2,
7687 .cim_num_obq = CIM_NUM_OBQ,
7688 .mps_rplc_size = 128,
7690 .sge_fl_db = F_DBPRIO,
7691 .mps_tcam_size = NUM_MPS_CLS_SRAM_L_INSTANCES,
7696 .pm_stats_cnt = PM_NSTATS,
7697 .cng_ch_bits_log = 2,
7699 .cim_num_obq = CIM_NUM_OBQ_T5,
7700 .mps_rplc_size = 128,
7702 .sge_fl_db = F_DBPRIO | F_DBTYPE,
7703 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
7708 .pm_stats_cnt = T6_PM_NSTATS,
7709 .cng_ch_bits_log = 3,
7711 .cim_num_obq = CIM_NUM_OBQ_T5,
7712 .mps_rplc_size = 256,
7715 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
7719 chipid -= CHELSIO_T4;
7720 if (chipid < 0 || chipid >= ARRAY_SIZE(chip_params))
7723 return &chip_params[chipid];
7727 * t4_prep_adapter - prepare SW and HW for operation
7728 * @adapter: the adapter
7729 * @buf: temporary space of at least VPD_LEN size provided by the caller.
7731 * Initialize adapter SW state for the various HW modules, set initial
7732 * values for some adapter tunables, take PHYs out of reset, and
7733 * initialize the MDIO interface.
7735 int t4_prep_adapter(struct adapter *adapter, u8 *buf)
7741 get_pci_mode(adapter, &adapter->params.pci);
7743 pl_rev = t4_read_reg(adapter, A_PL_REV);
7744 adapter->params.chipid = G_CHIPID(pl_rev);
7745 adapter->params.rev = G_REV(pl_rev);
7746 if (adapter->params.chipid == 0) {
7747 /* T4 did not have chipid in PL_REV (T5 onwards do) */
7748 adapter->params.chipid = CHELSIO_T4;
7750 /* T4A1 chip is not supported */
7751 if (adapter->params.rev == 1) {
7752 CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n");
7757 adapter->chip_params = t4_get_chip_params(chip_id(adapter));
7758 if (adapter->chip_params == NULL)
7761 adapter->params.pci.vpd_cap_addr =
7762 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
7764 ret = t4_get_flash_params(adapter);
7768 ret = get_vpd_params(adapter, &adapter->params.vpd, buf);
7772 /* Cards with real ASICs have the chipid in the PCIe device id */
7773 t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id);
7774 if (device_id >> 12 == chip_id(adapter))
7775 adapter->params.cim_la_size = CIMLA_SIZE;
7778 adapter->params.fpga = 1;
7779 adapter->params.cim_la_size = 2 * CIMLA_SIZE;
7782 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
7785 * Default port and clock for debugging in case we can't reach FW.
7787 adapter->params.nports = 1;
7788 adapter->params.portvec = 1;
7789 adapter->params.vpd.cclk = 50000;
7791 /* Set pci completion timeout value to 4 seconds. */
7792 set_pcie_completion_timeout(adapter, 0xd);
7797 * t4_shutdown_adapter - shut down adapter, host & wire
7798 * @adapter: the adapter
7800 * Perform an emergency shutdown of the adapter and stop it from
7801 * continuing any further communication on the ports or DMA to the
7802 * host. This is typically used when the adapter and/or firmware
7803 * have crashed and we want to prevent any further accidental
7804 * communication with the rest of the world. This will also force
7805 * the port Link Status to go down -- if register writes work --
7806 * which should help our peers figure out that we're down.
7808 int t4_shutdown_adapter(struct adapter *adapter)
7812 t4_intr_disable(adapter);
7813 t4_write_reg(adapter, A_DBG_GPIO_EN, 0);
7814 for_each_port(adapter, port) {
7815 u32 a_port_cfg = PORT_REG(port,
7820 t4_write_reg(adapter, a_port_cfg,
7821 t4_read_reg(adapter, a_port_cfg)
7822 & ~V_SIGNAL_DET(1));
7824 t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0);
7830 * t4_init_devlog_params - initialize adapter->params.devlog
7831 * @adap: the adapter
7832 * @fw_attach: whether we can talk to the firmware
7834 * Initialize various fields of the adapter's Firmware Device Log
7835 * Parameters structure.
7837 int t4_init_devlog_params(struct adapter *adap, int fw_attach)
7839 struct devlog_params *dparams = &adap->params.devlog;
7841 unsigned int devlog_meminfo;
7842 struct fw_devlog_cmd devlog_cmd;
7845 /* If we're dealing with newer firmware, the Device Log Paramerters
7846 * are stored in a designated register which allows us to access the
7847 * Device Log even if we can't talk to the firmware.
7850 t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG));
7852 unsigned int nentries, nentries128;
7854 dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams);
7855 dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4;
7857 nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams);
7858 nentries = (nentries128 + 1) * 128;
7859 dparams->size = nentries * sizeof(struct fw_devlog_e);
7865 * For any failing returns ...
7867 memset(dparams, 0, sizeof *dparams);
7870 * If we can't talk to the firmware, there's really nothing we can do
7876 /* Otherwise, ask the firmware for it's Device Log Parameters.
7878 memset(&devlog_cmd, 0, sizeof devlog_cmd);
7879 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
7880 F_FW_CMD_REQUEST | F_FW_CMD_READ);
7881 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
7882 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
7888 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
7889 dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo);
7890 dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4;
7891 dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
7897 * t4_init_sge_params - initialize adap->params.sge
7898 * @adapter: the adapter
7900 * Initialize various fields of the adapter's SGE Parameters structure.
7902 int t4_init_sge_params(struct adapter *adapter)
7905 struct sge_params *sp = &adapter->params.sge;
7908 r = t4_read_reg(adapter, A_SGE_INGRESS_RX_THRESHOLD);
7909 sp->counter_val[0] = G_THRESHOLD_0(r);
7910 sp->counter_val[1] = G_THRESHOLD_1(r);
7911 sp->counter_val[2] = G_THRESHOLD_2(r);
7912 sp->counter_val[3] = G_THRESHOLD_3(r);
7914 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_0_AND_1);
7915 sp->timer_val[0] = core_ticks_to_us(adapter, G_TIMERVALUE0(r));
7916 sp->timer_val[1] = core_ticks_to_us(adapter, G_TIMERVALUE1(r));
7917 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_2_AND_3);
7918 sp->timer_val[2] = core_ticks_to_us(adapter, G_TIMERVALUE2(r));
7919 sp->timer_val[3] = core_ticks_to_us(adapter, G_TIMERVALUE3(r));
7920 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_4_AND_5);
7921 sp->timer_val[4] = core_ticks_to_us(adapter, G_TIMERVALUE4(r));
7922 sp->timer_val[5] = core_ticks_to_us(adapter, G_TIMERVALUE5(r));
7924 r = t4_read_reg(adapter, A_SGE_CONM_CTRL);
7925 sp->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1;
7927 sp->fl_starve_threshold2 = sp->fl_starve_threshold;
7928 else if (is_t5(adapter))
7929 sp->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(r) * 2 + 1;
7931 sp->fl_starve_threshold2 = G_T6_EGRTHRESHOLDPACKING(r) * 2 + 1;
7933 /* egress queues: log2 of # of doorbells per BAR2 page */
7934 r = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
7935 r >>= S_QUEUESPERPAGEPF0 +
7936 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
7937 sp->eq_s_qpp = r & M_QUEUESPERPAGEPF0;
7939 /* ingress queues: log2 of # of doorbells per BAR2 page */
7940 r = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
7941 r >>= S_QUEUESPERPAGEPF0 +
7942 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
7943 sp->iq_s_qpp = r & M_QUEUESPERPAGEPF0;
7945 r = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
7946 r >>= S_HOSTPAGESIZEPF0 +
7947 (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf;
7948 sp->page_shift = (r & M_HOSTPAGESIZEPF0) + 10;
7950 r = t4_read_reg(adapter, A_SGE_CONTROL);
7951 sp->sge_control = r;
7952 sp->spg_len = r & F_EGRSTATUSPAGESIZE ? 128 : 64;
7953 sp->fl_pktshift = G_PKTSHIFT(r);
7954 if (chip_id(adapter) <= CHELSIO_T5) {
7955 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) +
7956 X_INGPADBOUNDARY_SHIFT);
7958 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) +
7959 X_T6_INGPADBOUNDARY_SHIFT);
7962 sp->pack_boundary = sp->pad_boundary;
7964 r = t4_read_reg(adapter, A_SGE_CONTROL2);
7965 if (G_INGPACKBOUNDARY(r) == 0)
7966 sp->pack_boundary = 16;
7968 sp->pack_boundary = 1 << (G_INGPACKBOUNDARY(r) + 5);
7970 for (i = 0; i < SGE_FLBUF_SIZES; i++)
7971 sp->sge_fl_buffer_size[i] = t4_read_reg(adapter,
7972 A_SGE_FL_BUFFER_SIZE0 + (4 * i));
7978 * Read and cache the adapter's compressed filter mode and ingress config.
7980 static void read_filter_mode_and_ingress_config(struct adapter *adap)
7982 struct tp_params *tpp = &adap->params.tp;
7984 if (t4_use_ldst(adap)) {
7985 t4_fw_tp_pio_rw(adap, &tpp->vlan_pri_map, 1,
7986 A_TP_VLAN_PRI_MAP, 1);
7987 t4_fw_tp_pio_rw(adap, &tpp->ingress_config, 1,
7988 A_TP_INGRESS_CONFIG, 1);
7990 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
7991 &tpp->vlan_pri_map, 1, A_TP_VLAN_PRI_MAP);
7992 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
7993 &tpp->ingress_config, 1, A_TP_INGRESS_CONFIG);
7997 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
7998 * shift positions of several elements of the Compressed Filter Tuple
7999 * for this adapter which we need frequently ...
8001 tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE);
8002 tpp->port_shift = t4_filter_field_shift(adap, F_PORT);
8003 tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
8004 tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN);
8005 tpp->tos_shift = t4_filter_field_shift(adap, F_TOS);
8006 tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
8007 tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE);
8008 tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH);
8009 tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE);
8010 tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION);
8013 * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
8014 * represents the presense of an Outer VLAN instead of a VNIC ID.
8016 if ((tpp->ingress_config & F_VNIC) == 0)
8017 tpp->vnic_shift = -1;
8021 * t4_init_tp_params - initialize adap->params.tp
8022 * @adap: the adapter
8024 * Initialize various fields of the adapter's TP Parameters structure.
8026 int t4_init_tp_params(struct adapter *adap)
8030 struct tp_params *tpp = &adap->params.tp;
8032 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
8033 tpp->tre = G_TIMERRESOLUTION(v);
8034 tpp->dack_re = G_DELAYEDACKRESOLUTION(v);
8036 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
8037 for (chan = 0; chan < MAX_NCHAN; chan++)
8038 tpp->tx_modq[chan] = chan;
8040 read_filter_mode_and_ingress_config(adap);
8043 * Cache a mask of the bits that represent the error vector portion of
8044 * rx_pkt.err_vec. T6+ can use a compressed error vector to make room
8045 * for information about outer encapsulation (GENEVE/VXLAN/NVGRE).
8047 tpp->err_vec_mask = htobe16(0xffff);
8048 if (chip_id(adap) > CHELSIO_T5) {
8049 v = t4_read_reg(adap, A_TP_OUT_CONFIG);
8050 if (v & F_CRXPKTENC) {
8052 htobe16(V_T6_COMPR_RXERR_VEC(M_T6_COMPR_RXERR_VEC));
8060 * t4_filter_field_shift - calculate filter field shift
8061 * @adap: the adapter
8062 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
8064 * Return the shift position of a filter field within the Compressed
8065 * Filter Tuple. The filter field is specified via its selection bit
8066 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
8068 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
8070 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
8074 if ((filter_mode & filter_sel) == 0)
8077 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
8078 switch (filter_mode & sel) {
8080 field_shift += W_FT_FCOE;
8083 field_shift += W_FT_PORT;
8086 field_shift += W_FT_VNIC_ID;
8089 field_shift += W_FT_VLAN;
8092 field_shift += W_FT_TOS;
8095 field_shift += W_FT_PROTOCOL;
8098 field_shift += W_FT_ETHERTYPE;
8101 field_shift += W_FT_MACMATCH;
8104 field_shift += W_FT_MPSHITTYPE;
8106 case F_FRAGMENTATION:
8107 field_shift += W_FT_FRAGMENTATION;
8114 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id)
8118 struct fw_port_cmd c;
8120 struct port_info *p = adap2pinfo(adap, port_id);
8123 memset(&c, 0, sizeof(c));
8125 for (i = 0, j = -1; i <= p->port_id; i++) {
8128 } while ((adap->params.portvec & (1 << j)) == 0);
8131 if (!(adap->flags & IS_VF) ||
8132 adap->params.vfres.r_caps & FW_CMD_CAP_PORT) {
8133 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) |
8134 F_FW_CMD_REQUEST | F_FW_CMD_READ |
8135 V_FW_PORT_CMD_PORTID(j));
8136 c.action_to_len16 = htonl(
8137 V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
8139 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
8143 ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
8144 p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
8145 G_FW_PORT_CMD_MDIOADDR(ret) : -1;
8146 p->port_type = G_FW_PORT_CMD_PTYPE(ret);
8147 p->mod_type = G_FW_PORT_CMD_MODTYPE(ret);
8149 init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap),
8150 be16_to_cpu(c.u.info.acap));
8153 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
8157 p->vi[0].viid = ret;
8158 if (chip_id(adap) <= CHELSIO_T5)
8159 p->vi[0].smt_idx = (ret & 0x7f) << 1;
8161 p->vi[0].smt_idx = (ret & 0x7f);
8163 p->rx_chan_map = t4_get_mps_bg_map(adap, j);
8165 p->vi[0].rss_size = rss_size;
8166 t4_os_set_hw_addr(adap, p->port_id, addr);
8168 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
8169 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
8170 V_FW_PARAMS_PARAM_YZ(p->vi[0].viid);
8171 ret = t4_query_params(adap, mbox, pf, vf, 1, ¶m, &val);
8173 p->vi[0].rss_base = 0xffff;
8175 /* MPASS((val >> 16) == rss_size); */
8176 p->vi[0].rss_base = val & 0xffff;
8183 * t4_read_cimq_cfg - read CIM queue configuration
8184 * @adap: the adapter
8185 * @base: holds the queue base addresses in bytes
8186 * @size: holds the queue sizes in bytes
8187 * @thres: holds the queue full thresholds in bytes
8189 * Returns the current configuration of the CIM queues, starting with
8190 * the IBQs, then the OBQs.
8192 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
8195 int cim_num_obq = adap->chip_params->cim_num_obq;
8197 for (i = 0; i < CIM_NUM_IBQ; i++) {
8198 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
8200 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
8201 /* value is in 256-byte units */
8202 *base++ = G_CIMQBASE(v) * 256;
8203 *size++ = G_CIMQSIZE(v) * 256;
8204 *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
8206 for (i = 0; i < cim_num_obq; i++) {
8207 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
8209 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
8210 /* value is in 256-byte units */
8211 *base++ = G_CIMQBASE(v) * 256;
8212 *size++ = G_CIMQSIZE(v) * 256;
8217 * t4_read_cim_ibq - read the contents of a CIM inbound queue
8218 * @adap: the adapter
8219 * @qid: the queue index
8220 * @data: where to store the queue contents
8221 * @n: capacity of @data in 32-bit words
8223 * Reads the contents of the selected CIM queue starting at address 0 up
8224 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
8225 * error and the number of 32-bit words actually read on success.
8227 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
8229 int i, err, attempts;
8231 const unsigned int nwords = CIM_IBQ_SIZE * 4;
8233 if (qid > 5 || (n & 3))
8236 addr = qid * nwords;
8240 /* It might take 3-10ms before the IBQ debug read access is allowed.
8241 * Wait for 1 Sec with a delay of 1 usec.
8245 for (i = 0; i < n; i++, addr++) {
8246 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
8248 err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
8252 *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
8254 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
8259 * t4_read_cim_obq - read the contents of a CIM outbound queue
8260 * @adap: the adapter
8261 * @qid: the queue index
8262 * @data: where to store the queue contents
8263 * @n: capacity of @data in 32-bit words
8265 * Reads the contents of the selected CIM queue starting at address 0 up
8266 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
8267 * error and the number of 32-bit words actually read on success.
8269 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
8272 unsigned int addr, v, nwords;
8273 int cim_num_obq = adap->chip_params->cim_num_obq;
8275 if ((qid > (cim_num_obq - 1)) || (n & 3))
8278 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
8279 V_QUENUMSELECT(qid));
8280 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
8282 addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */
8283 nwords = G_CIMQSIZE(v) * 64; /* same */
8287 for (i = 0; i < n; i++, addr++) {
8288 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
8290 err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
8294 *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
8296 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
8302 CIM_CTL_BASE = 0x2000,
8303 CIM_PBT_ADDR_BASE = 0x2800,
8304 CIM_PBT_LRF_BASE = 0x3000,
8305 CIM_PBT_DATA_BASE = 0x3800
8309 * t4_cim_read - read a block from CIM internal address space
8310 * @adap: the adapter
8311 * @addr: the start address within the CIM address space
8312 * @n: number of words to read
8313 * @valp: where to store the result
8315 * Reads a block of 4-byte words from the CIM intenal address space.
8317 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
8322 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
8325 for ( ; !ret && n--; addr += 4) {
8326 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
8327 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
8330 *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
8336 * t4_cim_write - write a block into CIM internal address space
8337 * @adap: the adapter
8338 * @addr: the start address within the CIM address space
8339 * @n: number of words to write
8340 * @valp: set of values to write
8342 * Writes a block of 4-byte words into the CIM intenal address space.
8344 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
8345 const unsigned int *valp)
8349 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
8352 for ( ; !ret && n--; addr += 4) {
8353 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
8354 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
8355 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
8361 static int t4_cim_write1(struct adapter *adap, unsigned int addr,
8364 return t4_cim_write(adap, addr, 1, &val);
8368 * t4_cim_ctl_read - read a block from CIM control region
8369 * @adap: the adapter
8370 * @addr: the start address within the CIM control region
8371 * @n: number of words to read
8372 * @valp: where to store the result
8374 * Reads a block of 4-byte words from the CIM control region.
8376 int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
8379 return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
8383 * t4_cim_read_la - read CIM LA capture buffer
8384 * @adap: the adapter
8385 * @la_buf: where to store the LA data
8386 * @wrptr: the HW write pointer within the capture buffer
8388 * Reads the contents of the CIM LA buffer with the most recent entry at
8389 * the end of the returned data and with the entry at @wrptr first.
8390 * We try to leave the LA in the running state we find it in.
8392 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
8395 unsigned int cfg, val, idx;
8397 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
8401 if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */
8402 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
8407 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
8411 idx = G_UPDBGLAWRPTR(val);
8415 for (i = 0; i < adap->params.cim_la_size; i++) {
8416 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
8417 V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
8420 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
8423 if (val & F_UPDBGLARDEN) {
8427 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
8431 /* address can't exceed 0xfff (UpDbgLaRdPtr is of 12-bits) */
8432 idx = (idx + 1) & M_UPDBGLARDPTR;
8434 * Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
8435 * identify the 32-bit portion of the full 312-bit data
8438 while ((idx & 0xf) > 9)
8439 idx = (idx + 1) % M_UPDBGLARDPTR;
8442 if (cfg & F_UPDBGLAEN) {
8443 int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
8444 cfg & ~F_UPDBGLARDEN);
8452 * t4_tp_read_la - read TP LA capture buffer
8453 * @adap: the adapter
8454 * @la_buf: where to store the LA data
8455 * @wrptr: the HW write pointer within the capture buffer
8457 * Reads the contents of the TP LA buffer with the most recent entry at
8458 * the end of the returned data and with the entry at @wrptr first.
8459 * We leave the LA in the running state we find it in.
8461 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
8463 bool last_incomplete;
8464 unsigned int i, cfg, val, idx;
8466 cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
8467 if (cfg & F_DBGLAENABLE) /* freeze LA */
8468 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
8469 adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
8471 val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
8472 idx = G_DBGLAWPTR(val);
8473 last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
8474 if (last_incomplete)
8475 idx = (idx + 1) & M_DBGLARPTR;
8480 val &= ~V_DBGLARPTR(M_DBGLARPTR);
8481 val |= adap->params.tp.la_mask;
8483 for (i = 0; i < TPLA_SIZE; i++) {
8484 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
8485 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
8486 idx = (idx + 1) & M_DBGLARPTR;
8489 /* Wipe out last entry if it isn't valid */
8490 if (last_incomplete)
8491 la_buf[TPLA_SIZE - 1] = ~0ULL;
8493 if (cfg & F_DBGLAENABLE) /* restore running state */
8494 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
8495 cfg | adap->params.tp.la_mask);
8499 * SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
8500 * seconds). If we find one of the SGE Ingress DMA State Machines in the same
8501 * state for more than the Warning Threshold then we'll issue a warning about
8502 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel
8503 * appears to be hung every Warning Repeat second till the situation clears.
8504 * If the situation clears, we'll note that as well.
8506 #define SGE_IDMA_WARN_THRESH 1
8507 #define SGE_IDMA_WARN_REPEAT 300
8510 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
8511 * @adapter: the adapter
8512 * @idma: the adapter IDMA Monitor state
8514 * Initialize the state of an SGE Ingress DMA Monitor.
8516 void t4_idma_monitor_init(struct adapter *adapter,
8517 struct sge_idma_monitor_state *idma)
8519 /* Initialize the state variables for detecting an SGE Ingress DMA
8520 * hang. The SGE has internal counters which count up on each clock
8521 * tick whenever the SGE finds its Ingress DMA State Engines in the
8522 * same state they were on the previous clock tick. The clock used is
8523 * the Core Clock so we have a limit on the maximum "time" they can
8524 * record; typically a very small number of seconds. For instance,
8525 * with a 600MHz Core Clock, we can only count up to a bit more than
8526 * 7s. So we'll synthesize a larger counter in order to not run the
8527 * risk of having the "timers" overflow and give us the flexibility to
8528 * maintain a Hung SGE State Machine of our own which operates across
8529 * a longer time frame.
8531 idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
8532 idma->idma_stalled[0] = idma->idma_stalled[1] = 0;
8536 * t4_idma_monitor - monitor SGE Ingress DMA state
8537 * @adapter: the adapter
8538 * @idma: the adapter IDMA Monitor state
8539 * @hz: number of ticks/second
8540 * @ticks: number of ticks since the last IDMA Monitor call
8542 void t4_idma_monitor(struct adapter *adapter,
8543 struct sge_idma_monitor_state *idma,
8546 int i, idma_same_state_cnt[2];
8548 /* Read the SGE Debug Ingress DMA Same State Count registers. These
8549 * are counters inside the SGE which count up on each clock when the
8550 * SGE finds its Ingress DMA State Engines in the same states they
8551 * were in the previous clock. The counters will peg out at
8552 * 0xffffffff without wrapping around so once they pass the 1s
8553 * threshold they'll stay above that till the IDMA state changes.
8555 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13);
8556 idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH);
8557 idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
8559 for (i = 0; i < 2; i++) {
8560 u32 debug0, debug11;
8562 /* If the Ingress DMA Same State Counter ("timer") is less
8563 * than 1s, then we can reset our synthesized Stall Timer and
8564 * continue. If we have previously emitted warnings about a
8565 * potential stalled Ingress Queue, issue a note indicating
8566 * that the Ingress Queue has resumed forward progress.
8568 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
8569 if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz)
8570 CH_WARN(adapter, "SGE idma%d, queue %u, "
8571 "resumed after %d seconds\n",
8572 i, idma->idma_qid[i],
8573 idma->idma_stalled[i]/hz);
8574 idma->idma_stalled[i] = 0;
8578 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
8579 * domain. The first time we get here it'll be because we
8580 * passed the 1s Threshold; each additional time it'll be
8581 * because the RX Timer Callback is being fired on its regular
8584 * If the stall is below our Potential Hung Ingress Queue
8585 * Warning Threshold, continue.
8587 if (idma->idma_stalled[i] == 0) {
8588 idma->idma_stalled[i] = hz;
8589 idma->idma_warn[i] = 0;
8591 idma->idma_stalled[i] += ticks;
8592 idma->idma_warn[i] -= ticks;
8595 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz)
8598 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
8600 if (idma->idma_warn[i] > 0)
8602 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz;
8604 /* Read and save the SGE IDMA State and Queue ID information.
8605 * We do this every time in case it changes across time ...
8606 * can't be too careful ...
8608 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0);
8609 debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
8610 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
8612 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11);
8613 debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
8614 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
8616 CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in "
8617 " state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
8618 i, idma->idma_qid[i], idma->idma_state[i],
8619 idma->idma_stalled[i]/hz,
8621 t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
8626 * t4_read_pace_tbl - read the pace table
8627 * @adap: the adapter
8628 * @pace_vals: holds the returned values
8630 * Returns the values of TP's pace table in microseconds.
8632 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
8636 for (i = 0; i < NTX_SCHED; i++) {
8637 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
8638 v = t4_read_reg(adap, A_TP_PACE_TABLE);
8639 pace_vals[i] = dack_ticks_to_usec(adap, v);
8644 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
8645 * @adap: the adapter
8646 * @sched: the scheduler index
8647 * @kbps: the byte rate in Kbps
8648 * @ipg: the interpacket delay in tenths of nanoseconds
8650 * Return the current configuration of a HW Tx scheduler.
8652 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
8655 unsigned int v, addr, bpt, cpt;
8658 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
8659 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
8660 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
8663 bpt = (v >> 8) & 0xff;
8666 *kbps = 0; /* scheduler disabled */
8668 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
8669 *kbps = (v * bpt) / 125;
8673 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
8674 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
8675 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
8679 *ipg = (10000 * v) / core_ticks_per_usec(adap);
8684 * t4_load_cfg - download config file
8685 * @adap: the adapter
8686 * @cfg_data: the cfg text file to write
8687 * @size: text file size
8689 * Write the supplied config text file to the card's serial flash.
8691 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
8693 int ret, i, n, cfg_addr;
8695 unsigned int flash_cfg_start_sec;
8696 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
8698 cfg_addr = t4_flash_cfg_addr(adap);
8703 flash_cfg_start_sec = addr / SF_SEC_SIZE;
8705 if (size > FLASH_CFG_MAX_SIZE) {
8706 CH_ERR(adap, "cfg file too large, max is %u bytes\n",
8707 FLASH_CFG_MAX_SIZE);
8711 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
8713 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
8714 flash_cfg_start_sec + i - 1);
8716 * If size == 0 then we're simply erasing the FLASH sectors associated
8717 * with the on-adapter Firmware Configuration File.
8719 if (ret || size == 0)
8722 /* this will write to the flash up to SF_PAGE_SIZE at a time */
8723 for (i = 0; i< size; i+= SF_PAGE_SIZE) {
8724 if ( (size - i) < SF_PAGE_SIZE)
8728 ret = t4_write_flash(adap, addr, n, cfg_data, 1);
8732 addr += SF_PAGE_SIZE;
8733 cfg_data += SF_PAGE_SIZE;
8738 CH_ERR(adap, "config file %s failed %d\n",
8739 (size == 0 ? "clear" : "download"), ret);
8744 * t5_fw_init_extern_mem - initialize the external memory
8745 * @adap: the adapter
8747 * Initializes the external memory on T5.
8749 int t5_fw_init_extern_mem(struct adapter *adap)
8751 u32 params[1], val[1];
8757 val[0] = 0xff; /* Initialize all MCs */
8758 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
8759 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT));
8760 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val,
8761 FW_CMD_MAX_TIMEOUT);
8766 /* BIOS boot headers */
8767 typedef struct pci_expansion_rom_header {
8768 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
8769 u8 reserved[22]; /* Reserved per processor Architecture data */
8770 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
8771 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
8773 /* Legacy PCI Expansion ROM Header */
8774 typedef struct legacy_pci_expansion_rom_header {
8775 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
8776 u8 size512; /* Current Image Size in units of 512 bytes */
8777 u8 initentry_point[4];
8778 u8 cksum; /* Checksum computed on the entire Image */
8779 u8 reserved[16]; /* Reserved */
8780 u8 pcir_offset[2]; /* Offset to PCI Data Struture */
8781 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
8783 /* EFI PCI Expansion ROM Header */
8784 typedef struct efi_pci_expansion_rom_header {
8785 u8 signature[2]; // ROM signature. The value 0xaa55
8786 u8 initialization_size[2]; /* Units 512. Includes this header */
8787 u8 efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
8788 u8 efi_subsystem[2]; /* Subsystem value for EFI image header */
8789 u8 efi_machine_type[2]; /* Machine type from EFI image header */
8790 u8 compression_type[2]; /* Compression type. */
8792 * Compression type definition
8795 * 0x2-0xFFFF: Reserved
8797 u8 reserved[8]; /* Reserved */
8798 u8 efi_image_header_offset[2]; /* Offset to EFI Image */
8799 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
8800 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
8802 /* PCI Data Structure Format */
8803 typedef struct pcir_data_structure { /* PCI Data Structure */
8804 u8 signature[4]; /* Signature. The string "PCIR" */
8805 u8 vendor_id[2]; /* Vendor Identification */
8806 u8 device_id[2]; /* Device Identification */
8807 u8 vital_product[2]; /* Pointer to Vital Product Data */
8808 u8 length[2]; /* PCIR Data Structure Length */
8809 u8 revision; /* PCIR Data Structure Revision */
8810 u8 class_code[3]; /* Class Code */
8811 u8 image_length[2]; /* Image Length. Multiple of 512B */
8812 u8 code_revision[2]; /* Revision Level of Code/Data */
8813 u8 code_type; /* Code Type. */
8815 * PCI Expansion ROM Code Types
8816 * 0x00: Intel IA-32, PC-AT compatible. Legacy
8817 * 0x01: Open Firmware standard for PCI. FCODE
8818 * 0x02: Hewlett-Packard PA RISC. HP reserved
8819 * 0x03: EFI Image. EFI
8820 * 0x04-0xFF: Reserved.
8822 u8 indicator; /* Indicator. Identifies the last image in the ROM */
8823 u8 reserved[2]; /* Reserved */
8824 } pcir_data_t; /* PCI__DATA_STRUCTURE */
8826 /* BOOT constants */
8828 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
8829 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */
8830 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */
8831 BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
8832 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment */
8833 VENDOR_ID = 0x1425, /* Vendor ID */
8834 PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
8838 * modify_device_id - Modifies the device ID of the Boot BIOS image
8839 * @adatper: the device ID to write.
8840 * @boot_data: the boot image to modify.
8842 * Write the supplied device ID to the boot BIOS image.
8844 static void modify_device_id(int device_id, u8 *boot_data)
8846 legacy_pci_exp_rom_header_t *header;
8847 pcir_data_t *pcir_header;
8851 * Loop through all chained images and change the device ID's
8854 header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
8855 pcir_header = (pcir_data_t *) &boot_data[cur_header +
8856 le16_to_cpu(*(u16*)header->pcir_offset)];
8859 * Only modify the Device ID if code type is Legacy or HP.
8860 * 0x00: Okay to modify
8861 * 0x01: FCODE. Do not be modify
8862 * 0x03: Okay to modify
8863 * 0x04-0xFF: Do not modify
8865 if (pcir_header->code_type == 0x00) {
8870 * Modify Device ID to match current adatper
8872 *(u16*) pcir_header->device_id = device_id;
8875 * Set checksum temporarily to 0.
8876 * We will recalculate it later.
8878 header->cksum = 0x0;
8881 * Calculate and update checksum
8883 for (i = 0; i < (header->size512 * 512); i++)
8884 csum += (u8)boot_data[cur_header + i];
8887 * Invert summed value to create the checksum
8888 * Writing new checksum value directly to the boot data
8890 boot_data[cur_header + 7] = -csum;
8892 } else if (pcir_header->code_type == 0x03) {
8895 * Modify Device ID to match current adatper
8897 *(u16*) pcir_header->device_id = device_id;
8903 * Check indicator element to identify if this is the last
8906 if (pcir_header->indicator & 0x80)
8910 * Move header pointer up to the next image in the ROM.
8912 cur_header += header->size512 * 512;
8917 * t4_load_boot - download boot flash
8918 * @adapter: the adapter
8919 * @boot_data: the boot image to write
8920 * @boot_addr: offset in flash to write boot_data
8923 * Write the supplied boot image to the card's serial flash.
8924 * The boot image has the following sections: a 28-byte header and the
8927 int t4_load_boot(struct adapter *adap, u8 *boot_data,
8928 unsigned int boot_addr, unsigned int size)
8930 pci_exp_rom_header_t *header;
8932 pcir_data_t *pcir_header;
8936 unsigned int boot_sector = (boot_addr * 1024 );
8937 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
8940 * Make sure the boot image does not encroach on the firmware region
8942 if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
8943 CH_ERR(adap, "boot image encroaching on firmware region\n");
8948 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
8949 * and Boot configuration data sections. These 3 boot sections span
8950 * sectors 0 to 7 in flash and live right before the FW image location.
8952 i = DIV_ROUND_UP(size ? size : FLASH_FW_START,
8954 ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
8955 (boot_sector >> 16) + i - 1);
8958 * If size == 0 then we're simply erasing the FLASH sectors associated
8959 * with the on-adapter option ROM file
8961 if (ret || (size == 0))
8964 /* Get boot header */
8965 header = (pci_exp_rom_header_t *)boot_data;
8966 pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
8967 /* PCIR Data Structure */
8968 pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
8971 * Perform some primitive sanity testing to avoid accidentally
8972 * writing garbage over the boot sectors. We ought to check for
8973 * more but it's not worth it for now ...
8975 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
8976 CH_ERR(adap, "boot image too small/large\n");
8980 #ifndef CHELSIO_T4_DIAGS
8982 * Check BOOT ROM header signature
8984 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
8985 CH_ERR(adap, "Boot image missing signature\n");
8990 * Check PCI header signature
8992 if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
8993 CH_ERR(adap, "PCI header missing signature\n");
8998 * Check Vendor ID matches Chelsio ID
9000 if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
9001 CH_ERR(adap, "Vendor ID missing signature\n");
9007 * Retrieve adapter's device ID
9009 t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
9010 /* Want to deal with PF 0 so I strip off PF 4 indicator */
9011 device_id = device_id & 0xf0ff;
9014 * Check PCIE Device ID
9016 if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
9018 * Change the device ID in the Boot BIOS image to match
9019 * the Device ID of the current adapter.
9021 modify_device_id(device_id, boot_data);
9025 * Skip over the first SF_PAGE_SIZE worth of data and write it after
9026 * we finish copying the rest of the boot image. This will ensure
9027 * that the BIOS boot header will only be written if the boot image
9028 * was written in full.
9031 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
9032 addr += SF_PAGE_SIZE;
9033 boot_data += SF_PAGE_SIZE;
9034 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
9039 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
9040 (const u8 *)header, 0);
9044 CH_ERR(adap, "boot image download failed, error %d\n", ret);
9049 * t4_flash_bootcfg_addr - return the address of the flash optionrom configuration
9050 * @adapter: the adapter
9052 * Return the address within the flash where the OptionROM Configuration
9053 * is stored, or an error if the device FLASH is too small to contain
9054 * a OptionROM Configuration.
9056 static int t4_flash_bootcfg_addr(struct adapter *adapter)
9059 * If the device FLASH isn't large enough to hold a Firmware
9060 * Configuration File, return an error.
9062 if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE)
9065 return FLASH_BOOTCFG_START;
9068 int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size)
9070 int ret, i, n, cfg_addr;
9072 unsigned int flash_cfg_start_sec;
9073 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
9075 cfg_addr = t4_flash_bootcfg_addr(adap);
9080 flash_cfg_start_sec = addr / SF_SEC_SIZE;
9082 if (size > FLASH_BOOTCFG_MAX_SIZE) {
9083 CH_ERR(adap, "bootcfg file too large, max is %u bytes\n",
9084 FLASH_BOOTCFG_MAX_SIZE);
9088 i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */
9090 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
9091 flash_cfg_start_sec + i - 1);
9094 * If size == 0 then we're simply erasing the FLASH sectors associated
9095 * with the on-adapter OptionROM Configuration File.
9097 if (ret || size == 0)
9100 /* this will write to the flash up to SF_PAGE_SIZE at a time */
9101 for (i = 0; i< size; i+= SF_PAGE_SIZE) {
9102 if ( (size - i) < SF_PAGE_SIZE)
9106 ret = t4_write_flash(adap, addr, n, cfg_data, 0);
9110 addr += SF_PAGE_SIZE;
9111 cfg_data += SF_PAGE_SIZE;
9116 CH_ERR(adap, "boot config data %s failed %d\n",
9117 (size == 0 ? "clear" : "download"), ret);
9122 * t4_set_filter_mode - configure the optional components of filter tuples
9123 * @adap: the adapter
9124 * @mode_map: a bitmap selcting which optional filter components to enable
9126 * Sets the filter mode by selecting the optional components to enable
9127 * in filter tuples. Returns 0 on success and a negative error if the
9128 * requested mode needs more bits than are available for optional
9131 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map)
9133 static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
9137 for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
9138 if (mode_map & (1 << i))
9140 if (nbits > FILTER_OPT_LEN)
9142 if (t4_use_ldst(adap))
9143 t4_fw_tp_pio_rw(adap, &mode_map, 1, A_TP_VLAN_PRI_MAP, 0);
9145 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map,
9146 1, A_TP_VLAN_PRI_MAP);
9147 read_filter_mode_and_ingress_config(adap);
9153 * t4_clr_port_stats - clear port statistics
9154 * @adap: the adapter
9155 * @idx: the port index
9157 * Clear HW statistics for the given port.
9159 void t4_clr_port_stats(struct adapter *adap, int idx)
9162 u32 bgmap = t4_get_mps_bg_map(adap, idx);
9166 port_base_addr = PORT_BASE(idx);
9168 port_base_addr = T5_PORT_BASE(idx);
9170 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
9171 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
9172 t4_write_reg(adap, port_base_addr + i, 0);
9173 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
9174 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
9175 t4_write_reg(adap, port_base_addr + i, 0);
9176 for (i = 0; i < 4; i++)
9177 if (bgmap & (1 << i)) {
9179 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
9181 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
9186 * t4_i2c_rd - read I2C data from adapter
9187 * @adap: the adapter
9188 * @port: Port number if per-port device; <0 if not
9189 * @devid: per-port device ID or absolute device ID
9190 * @offset: byte offset into device I2C space
9191 * @len: byte length of I2C space data
9192 * @buf: buffer in which to return I2C data
9194 * Reads the I2C data from the indicated device and location.
9196 int t4_i2c_rd(struct adapter *adap, unsigned int mbox,
9197 int port, unsigned int devid,
9198 unsigned int offset, unsigned int len,
9202 struct fw_ldst_cmd ldst;
9208 len > sizeof ldst.u.i2c.data)
9211 memset(&ldst, 0, sizeof ldst);
9212 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
9213 ldst.op_to_addrspace =
9214 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
9218 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
9219 ldst.u.i2c.pid = (port < 0 ? 0xff : port);
9220 ldst.u.i2c.did = devid;
9221 ldst.u.i2c.boffset = offset;
9222 ldst.u.i2c.blen = len;
9223 ret = t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
9225 memcpy(buf, ldst.u.i2c.data, len);
9230 * t4_i2c_wr - write I2C data to adapter
9231 * @adap: the adapter
9232 * @port: Port number if per-port device; <0 if not
9233 * @devid: per-port device ID or absolute device ID
9234 * @offset: byte offset into device I2C space
9235 * @len: byte length of I2C space data
9236 * @buf: buffer containing new I2C data
9238 * Write the I2C data to the indicated device and location.
9240 int t4_i2c_wr(struct adapter *adap, unsigned int mbox,
9241 int port, unsigned int devid,
9242 unsigned int offset, unsigned int len,
9246 struct fw_ldst_cmd ldst;
9251 len > sizeof ldst.u.i2c.data)
9254 memset(&ldst, 0, sizeof ldst);
9255 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
9256 ldst.op_to_addrspace =
9257 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
9261 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
9262 ldst.u.i2c.pid = (port < 0 ? 0xff : port);
9263 ldst.u.i2c.did = devid;
9264 ldst.u.i2c.boffset = offset;
9265 ldst.u.i2c.blen = len;
9266 memcpy(ldst.u.i2c.data, buf, len);
9267 return t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
9271 * t4_sge_ctxt_rd - read an SGE context through FW
9272 * @adap: the adapter
9273 * @mbox: mailbox to use for the FW command
9274 * @cid: the context id
9275 * @ctype: the context type
9276 * @data: where to store the context data
9278 * Issues a FW command through the given mailbox to read an SGE context.
9280 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
9281 enum ctxt_type ctype, u32 *data)
9284 struct fw_ldst_cmd c;
9286 if (ctype == CTXT_EGRESS)
9287 ret = FW_LDST_ADDRSPC_SGE_EGRC;
9288 else if (ctype == CTXT_INGRESS)
9289 ret = FW_LDST_ADDRSPC_SGE_INGC;
9290 else if (ctype == CTXT_FLM)
9291 ret = FW_LDST_ADDRSPC_SGE_FLMC;
9293 ret = FW_LDST_ADDRSPC_SGE_CONMC;
9295 memset(&c, 0, sizeof(c));
9296 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
9297 F_FW_CMD_REQUEST | F_FW_CMD_READ |
9298 V_FW_LDST_CMD_ADDRSPACE(ret));
9299 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
9300 c.u.idctxt.physid = cpu_to_be32(cid);
9302 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
9304 data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
9305 data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
9306 data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
9307 data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
9308 data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
9309 data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
9315 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
9316 * @adap: the adapter
9317 * @cid: the context id
9318 * @ctype: the context type
9319 * @data: where to store the context data
9321 * Reads an SGE context directly, bypassing FW. This is only for
9322 * debugging when FW is unavailable.
9324 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
9329 t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
9330 ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
9332 for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
9333 *data++ = t4_read_reg(adap, i);
9337 int t4_sched_config(struct adapter *adapter, int type, int minmaxen,
9340 struct fw_sched_cmd cmd;
9342 memset(&cmd, 0, sizeof(cmd));
9343 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
9346 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9348 cmd.u.config.sc = FW_SCHED_SC_CONFIG;
9349 cmd.u.config.type = type;
9350 cmd.u.config.minmaxen = minmaxen;
9352 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
9356 int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
9357 int rateunit, int ratemode, int channel, int cl,
9358 int minrate, int maxrate, int weight, int pktsize,
9361 struct fw_sched_cmd cmd;
9363 memset(&cmd, 0, sizeof(cmd));
9364 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
9367 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9369 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
9370 cmd.u.params.type = type;
9371 cmd.u.params.level = level;
9372 cmd.u.params.mode = mode;
9373 cmd.u.params.ch = channel;
9374 cmd.u.params.cl = cl;
9375 cmd.u.params.unit = rateunit;
9376 cmd.u.params.rate = ratemode;
9377 cmd.u.params.min = cpu_to_be32(minrate);
9378 cmd.u.params.max = cpu_to_be32(maxrate);
9379 cmd.u.params.weight = cpu_to_be16(weight);
9380 cmd.u.params.pktsize = cpu_to_be16(pktsize);
9382 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
9387 * t4_config_watchdog - configure (enable/disable) a watchdog timer
9388 * @adapter: the adapter
9389 * @mbox: mailbox to use for the FW command
9390 * @pf: the PF owning the queue
9391 * @vf: the VF owning the queue
9392 * @timeout: watchdog timeout in ms
9393 * @action: watchdog timer / action
9395 * There are separate watchdog timers for each possible watchdog
9396 * action. Configure one of the watchdog timers by setting a non-zero
9397 * timeout. Disable a watchdog timer by using a timeout of zero.
9399 int t4_config_watchdog(struct adapter *adapter, unsigned int mbox,
9400 unsigned int pf, unsigned int vf,
9401 unsigned int timeout, unsigned int action)
9403 struct fw_watchdog_cmd wdog;
9407 * The watchdog command expects a timeout in units of 10ms so we need
9408 * to convert it here (via rounding) and force a minimum of one 10ms
9409 * "tick" if the timeout is non-zero but the convertion results in 0
9412 ticks = (timeout + 5)/10;
9413 if (timeout && !ticks)
9416 memset(&wdog, 0, sizeof wdog);
9417 wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) |
9420 V_FW_PARAMS_CMD_PFN(pf) |
9421 V_FW_PARAMS_CMD_VFN(vf));
9422 wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog));
9423 wdog.timeout = cpu_to_be32(ticks);
9424 wdog.action = cpu_to_be32(action);
9426 return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL);
9429 int t4_get_devlog_level(struct adapter *adapter, unsigned int *level)
9431 struct fw_devlog_cmd devlog_cmd;
9434 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
9435 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
9436 F_FW_CMD_REQUEST | F_FW_CMD_READ);
9437 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
9438 ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
9439 sizeof(devlog_cmd), &devlog_cmd);
9443 *level = devlog_cmd.level;
9447 int t4_set_devlog_level(struct adapter *adapter, unsigned int level)
9449 struct fw_devlog_cmd devlog_cmd;
9451 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
9452 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
9455 devlog_cmd.level = level;
9456 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
9457 return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
9458 sizeof(devlog_cmd), &devlog_cmd);