2 * Copyright (c) 2012, 2016 Chelsio Communications, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
34 #include "t4_regs_values.h"
35 #include "firmware/t4fw_interface.h"
38 #define msleep(x) do { \
42 pause("t4hw", (x) * hz / 1000); \
46 * t4_wait_op_done_val - wait until an operation is completed
47 * @adapter: the adapter performing the operation
48 * @reg: the register to check for completion
49 * @mask: a single-bit field within @reg that indicates completion
50 * @polarity: the value of the field when the operation is completed
51 * @attempts: number of check iterations
52 * @delay: delay in usecs between iterations
53 * @valp: where to store the value of the register at completion time
55 * Wait until an operation is completed by checking a bit in a register
56 * up to @attempts times. If @valp is not NULL the value of the register
57 * at the time it indicated completion is stored there. Returns 0 if the
58 * operation completes and -EAGAIN otherwise.
60 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
61 int polarity, int attempts, int delay, u32 *valp)
64 u32 val = t4_read_reg(adapter, reg);
66 if (!!(val & mask) == polarity) {
78 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
79 int polarity, int attempts, int delay)
81 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
86 * t4_set_reg_field - set a register field to a value
87 * @adapter: the adapter to program
88 * @addr: the register address
89 * @mask: specifies the portion of the register to modify
90 * @val: the new value for the register field
92 * Sets a register field specified by the supplied mask to the
95 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
98 u32 v = t4_read_reg(adapter, addr) & ~mask;
100 t4_write_reg(adapter, addr, v | val);
101 (void) t4_read_reg(adapter, addr); /* flush */
105 * t4_read_indirect - read indirectly addressed registers
107 * @addr_reg: register holding the indirect address
108 * @data_reg: register holding the value of the indirect register
109 * @vals: where the read register values are stored
110 * @nregs: how many indirect registers to read
111 * @start_idx: index of first indirect register to read
113 * Reads registers that are accessed indirectly through an address/data
116 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
117 unsigned int data_reg, u32 *vals,
118 unsigned int nregs, unsigned int start_idx)
121 t4_write_reg(adap, addr_reg, start_idx);
122 *vals++ = t4_read_reg(adap, data_reg);
128 * t4_write_indirect - write indirectly addressed registers
130 * @addr_reg: register holding the indirect addresses
131 * @data_reg: register holding the value for the indirect registers
132 * @vals: values to write
133 * @nregs: how many indirect registers to write
134 * @start_idx: address of first indirect register to write
136 * Writes a sequential block of registers that are accessed indirectly
137 * through an address/data register pair.
139 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
140 unsigned int data_reg, const u32 *vals,
141 unsigned int nregs, unsigned int start_idx)
144 t4_write_reg(adap, addr_reg, start_idx++);
145 t4_write_reg(adap, data_reg, *vals++);
150 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
151 * mechanism. This guarantees that we get the real value even if we're
152 * operating within a Virtual Machine and the Hypervisor is trapping our
153 * Configuration Space accesses.
155 * N.B. This routine should only be used as a last resort: the firmware uses
156 * the backdoor registers on a regular basis and we can end up
157 * conflicting with it's uses!
159 u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
161 u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg);
164 if (chip_id(adap) <= CHELSIO_T5)
172 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req);
173 val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
176 * Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
177 * Configuration Space read. (None of the other fields matter when
178 * F_ENABLE is 0 so a simple register write is easier than a
179 * read-modify-write via t4_set_reg_field().)
181 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0);
187 * t4_report_fw_error - report firmware error
190 * The adapter firmware can indicate error conditions to the host.
191 * If the firmware has indicated an error, print out the reason for
192 * the firmware error.
194 static void t4_report_fw_error(struct adapter *adap)
196 static const char *const reason[] = {
197 "Crash", /* PCIE_FW_EVAL_CRASH */
198 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
199 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
200 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
201 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
202 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
203 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
204 "Reserved", /* reserved */
208 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
209 if (pcie_fw & F_PCIE_FW_ERR)
210 CH_ERR(adap, "Firmware reports adapter error: %s\n",
211 reason[G_PCIE_FW_EVAL(pcie_fw)]);
215 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
217 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
220 for ( ; nflit; nflit--, mbox_addr += 8)
221 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
225 * Handle a FW assertion reported in a mailbox.
227 static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt)
230 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
231 asrt->u.assert.filename_0_7,
232 be32_to_cpu(asrt->u.assert.line),
233 be32_to_cpu(asrt->u.assert.x),
234 be32_to_cpu(asrt->u.assert.y));
237 #define X_CIM_PF_NOACCESS 0xeeeeeeee
239 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
241 * @mbox: index of the mailbox to use
242 * @cmd: the command to write
243 * @size: command length in bytes
244 * @rpl: where to optionally store the reply
245 * @sleep_ok: if true we may sleep while awaiting command completion
246 * @timeout: time to wait for command to finish before timing out
247 * (negative implies @sleep_ok=false)
249 * Sends the given command to FW through the selected mailbox and waits
250 * for the FW to execute the command. If @rpl is not %NULL it is used to
251 * store the FW's reply to the command. The command and its optional
252 * reply are of the same length. Some FW commands like RESET and
253 * INITIALIZE can take a considerable amount of time to execute.
254 * @sleep_ok determines whether we may sleep while awaiting the response.
255 * If sleeping is allowed we use progressive backoff otherwise we spin.
256 * Note that passing in a negative @timeout is an alternate mechanism
257 * for specifying @sleep_ok=false. This is useful when a higher level
258 * interface allows for specification of @timeout but not @sleep_ok ...
260 * The return value is 0 on success or a negative errno on failure. A
261 * failure can happen either because we are not able to execute the
262 * command or FW executes it but signals an error. In the latter case
263 * the return value is the error code indicated by FW (negated).
265 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
266 int size, void *rpl, bool sleep_ok, int timeout)
269 * We delay in small increments at first in an effort to maintain
270 * responsiveness for simple, fast executing commands but then back
271 * off to larger delays to a maximum retry delay.
273 static const int delay[] = {
274 1, 1, 3, 5, 10, 10, 20, 50, 100
278 int i, ms, delay_idx, ret;
279 const __be64 *p = cmd;
280 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
281 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
283 __be64 cmd_rpl[MBOX_LEN/8];
286 if ((size & 15) || size > MBOX_LEN)
289 if (adap->flags & IS_VF) {
291 data_reg = FW_T6VF_MBDATA_BASE_ADDR;
293 data_reg = FW_T4VF_MBDATA_BASE_ADDR;
294 ctl_reg = VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL);
298 * If we have a negative timeout, that implies that we can't sleep.
306 * Attempt to gain access to the mailbox.
308 for (i = 0; i < 4; i++) {
309 ctl = t4_read_reg(adap, ctl_reg);
311 if (v != X_MBOWNER_NONE)
316 * If we were unable to gain access, dequeue ourselves from the
317 * mailbox atomic access list and report the error to our caller.
319 if (v != X_MBOWNER_PL) {
320 t4_report_fw_error(adap);
321 ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
326 * If we gain ownership of the mailbox and there's a "valid" message
327 * in it, this is likely an asynchronous error message from the
328 * firmware. So we'll report that and then proceed on with attempting
329 * to issue our own command ... which may well fail if the error
330 * presaged the firmware crashing ...
332 if (ctl & F_MBMSGVALID) {
333 CH_ERR(adap, "found VALID command in mbox %u: "
334 "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
335 (unsigned long long)t4_read_reg64(adap, data_reg),
336 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
337 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
338 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
339 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
340 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
341 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
342 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
346 * Copy in the new mailbox command and send it on its way ...
348 for (i = 0; i < size; i += 8, p++)
349 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
351 if (adap->flags & IS_VF) {
353 * For the VFs, the Mailbox Data "registers" are
354 * actually backed by T4's "MA" interface rather than
355 * PL Registers (as is the case for the PFs). Because
356 * these are in different coherency domains, the write
357 * to the VF's PL-register-backed Mailbox Control can
358 * race in front of the writes to the MA-backed VF
359 * Mailbox Data "registers". So we need to do a
360 * read-back on at least one byte of the VF Mailbox
361 * Data registers before doing the write to the VF
362 * Mailbox Control register.
364 t4_read_reg(adap, data_reg);
367 CH_DUMP_MBOX(adap, mbox, data_reg);
369 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
370 t4_read_reg(adap, ctl_reg); /* flush write */
376 * Loop waiting for the reply; bail out if we time out or the firmware
380 for (i = 0; i < timeout; i += ms) {
381 if (!(adap->flags & IS_VF)) {
382 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
383 if (pcie_fw & F_PCIE_FW_ERR)
387 ms = delay[delay_idx]; /* last element may repeat */
388 if (delay_idx < ARRAY_SIZE(delay) - 1)
395 v = t4_read_reg(adap, ctl_reg);
396 if (v == X_CIM_PF_NOACCESS)
398 if (G_MBOWNER(v) == X_MBOWNER_PL) {
399 if (!(v & F_MBMSGVALID)) {
400 t4_write_reg(adap, ctl_reg,
401 V_MBOWNER(X_MBOWNER_NONE));
406 * Retrieve the command reply and release the mailbox.
408 get_mbox_rpl(adap, cmd_rpl, MBOX_LEN/8, data_reg);
409 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
411 CH_DUMP_MBOX(adap, mbox, data_reg);
413 res = be64_to_cpu(cmd_rpl[0]);
414 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
415 fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl);
416 res = V_FW_CMD_RETVAL(EIO);
418 memcpy(rpl, cmd_rpl, size);
419 return -G_FW_CMD_RETVAL((int)res);
424 * We timed out waiting for a reply to our mailbox command. Report
425 * the error and also check to see if the firmware reported any
428 ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
429 CH_ERR(adap, "command %#x in mailbox %d timed out\n",
430 *(const u8 *)cmd, mbox);
432 t4_report_fw_error(adap);
437 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
438 void *rpl, bool sleep_ok)
440 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl,
441 sleep_ok, FW_CMD_MAX_TIMEOUT);
445 static int t4_edc_err_read(struct adapter *adap, int idx)
447 u32 edc_ecc_err_addr_reg;
448 u32 edc_bist_status_rdata_reg;
451 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
454 if (idx != 0 && idx != 1) {
455 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
459 edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx);
460 edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx);
463 "edc%d err addr 0x%x: 0x%x.\n",
464 idx, edc_ecc_err_addr_reg,
465 t4_read_reg(adap, edc_ecc_err_addr_reg));
467 "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
468 edc_bist_status_rdata_reg,
469 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg),
470 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8),
471 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16),
472 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24),
473 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32),
474 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40),
475 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48),
476 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56),
477 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64));
483 * t4_mc_read - read from MC through backdoor accesses
485 * @idx: which MC to access
486 * @addr: address of first byte requested
487 * @data: 64 bytes of data containing the requested address
488 * @ecc: where to store the corresponding 64-bit ECC word
490 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
491 * that covers the requested address @addr. If @parity is not %NULL it
492 * is assigned the 64-bit ECC word for the read data.
494 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
497 u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
498 u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
501 mc_bist_cmd_reg = A_MC_BIST_CMD;
502 mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR;
503 mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN;
504 mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA;
505 mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN;
507 mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx);
508 mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx);
509 mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx);
510 mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA,
512 mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN,
516 if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST)
518 t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU);
519 t4_write_reg(adap, mc_bist_cmd_len_reg, 64);
520 t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc);
521 t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) |
522 F_START_BIST | V_BIST_CMD_GAP(1));
523 i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
527 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i)
529 for (i = 15; i >= 0; i--)
530 *data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
532 *ecc = t4_read_reg64(adap, MC_DATA(16));
538 * t4_edc_read - read from EDC through backdoor accesses
540 * @idx: which EDC to access
541 * @addr: address of first byte requested
542 * @data: 64 bytes of data containing the requested address
543 * @ecc: where to store the corresponding 64-bit ECC word
545 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
546 * that covers the requested address @addr. If @parity is not %NULL it
547 * is assigned the 64-bit ECC word for the read data.
549 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
552 u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
553 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
556 edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx);
557 edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx);
558 edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx);
559 edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN,
561 edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA,
565 * These macro are missing in t4_regs.h file.
566 * Added temporarily for testing.
568 #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
569 #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
570 edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx);
571 edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx);
572 edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx);
573 edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN,
575 edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA,
581 if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST)
583 t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU);
584 t4_write_reg(adap, edc_bist_cmd_len_reg, 64);
585 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
586 t4_write_reg(adap, edc_bist_cmd_reg,
587 V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
588 i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
592 #define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i)
594 for (i = 15; i >= 0; i--)
595 *data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
597 *ecc = t4_read_reg64(adap, EDC_DATA(16));
603 * t4_mem_read - read EDC 0, EDC 1 or MC into buffer
605 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
606 * @addr: address within indicated memory type
607 * @len: amount of memory to read
608 * @buf: host memory buffer
610 * Reads an [almost] arbitrary memory region in the firmware: the
611 * firmware memory address, length and host buffer must be aligned on
612 * 32-bit boudaries. The memory is returned as a raw byte sequence from
613 * the firmware's memory. If this memory contains data structures which
614 * contain multi-byte integers, it's the callers responsibility to
615 * perform appropriate byte order conversions.
617 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
620 u32 pos, start, end, offset;
624 * Argument sanity checks ...
626 if ((addr & 0x3) || (len & 0x3))
630 * The underlaying EDC/MC read routines read 64 bytes at a time so we
631 * need to round down the start and round up the end. We'll start
632 * copying out of the first line at (addr - start) a word at a time.
634 start = addr & ~(64-1);
635 end = (addr + len + 64-1) & ~(64-1);
636 offset = (addr - start)/sizeof(__be32);
638 for (pos = start; pos < end; pos += 64, offset = 0) {
642 * Read the chip's memory block and bail if there's an error.
644 if ((mtype == MEM_MC) || (mtype == MEM_MC1))
645 ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL);
647 ret = t4_edc_read(adap, mtype, pos, data, NULL);
652 * Copy the data into the caller's memory buffer.
654 while (offset < 16 && len > 0) {
655 *buf++ = data[offset++];
656 len -= sizeof(__be32);
664 * Return the specified PCI-E Configuration Space register from our Physical
665 * Function. We try first via a Firmware LDST Command (if fw_attach != 0)
666 * since we prefer to let the firmware own all of these registers, but if that
667 * fails we go for it directly ourselves.
669 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach)
673 * If fw_attach != 0, construct and send the Firmware LDST Command to
674 * retrieve the specified PCI-E Configuration Space register.
676 if (drv_fw_attach != 0) {
677 struct fw_ldst_cmd ldst_cmd;
680 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
681 ldst_cmd.op_to_addrspace =
682 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
685 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
686 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
687 ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1);
688 ldst_cmd.u.pcie.ctrl_to_fn =
689 (F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf));
690 ldst_cmd.u.pcie.r = reg;
693 * If the LDST Command succeeds, return the result, otherwise
694 * fall through to reading it directly ourselves ...
696 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
699 return be32_to_cpu(ldst_cmd.u.pcie.data[0]);
701 CH_WARN(adap, "Firmware failed to return "
702 "Configuration Space register %d, err = %d\n",
707 * Read the desired Configuration Space register via the PCI-E
708 * Backdoor mechanism.
710 return t4_hw_pci_read_cfg4(adap, reg);
714 * t4_get_regs_len - return the size of the chips register set
715 * @adapter: the adapter
717 * Returns the size of the chip's BAR0 register space.
719 unsigned int t4_get_regs_len(struct adapter *adapter)
721 unsigned int chip_version = chip_id(adapter);
723 switch (chip_version) {
725 if (adapter->flags & IS_VF)
726 return FW_T4VF_REGMAP_SIZE;
727 return T4_REGMAP_SIZE;
731 if (adapter->flags & IS_VF)
732 return FW_T4VF_REGMAP_SIZE;
733 return T5_REGMAP_SIZE;
737 "Unsupported chip version %d\n", chip_version);
742 * t4_get_regs - read chip registers into provided buffer
744 * @buf: register buffer
745 * @buf_size: size (in bytes) of register buffer
747 * If the provided register buffer isn't large enough for the chip's
748 * full register range, the register dump will be truncated to the
749 * register buffer's size.
751 void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
753 static const unsigned int t4_reg_ranges[] = {
1211 static const unsigned int t4vf_reg_ranges[] = {
1212 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
1213 VF_MPS_REG(A_MPS_VF_CTL),
1214 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
1215 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_WHOAMI),
1216 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
1217 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
1218 FW_T4VF_MBDATA_BASE_ADDR,
1219 FW_T4VF_MBDATA_BASE_ADDR +
1220 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
1223 static const unsigned int t5_reg_ranges[] = {
1998 static const unsigned int t5vf_reg_ranges[] = {
1999 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
2000 VF_MPS_REG(A_MPS_VF_CTL),
2001 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
2002 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION),
2003 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
2004 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
2005 FW_T4VF_MBDATA_BASE_ADDR,
2006 FW_T4VF_MBDATA_BASE_ADDR +
2007 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
2010 static const unsigned int t6_reg_ranges[] = {
2587 static const unsigned int t6vf_reg_ranges[] = {
2588 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
2589 VF_MPS_REG(A_MPS_VF_CTL),
2590 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
2591 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION),
2592 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
2593 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
2594 FW_T6VF_MBDATA_BASE_ADDR,
2595 FW_T6VF_MBDATA_BASE_ADDR +
2596 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
2599 u32 *buf_end = (u32 *)(buf + buf_size);
2600 const unsigned int *reg_ranges;
2601 int reg_ranges_size, range;
2602 unsigned int chip_version = chip_id(adap);
2605 * Select the right set of register ranges to dump depending on the
2606 * adapter chip type.
2608 switch (chip_version) {
2610 if (adap->flags & IS_VF) {
2611 reg_ranges = t4vf_reg_ranges;
2612 reg_ranges_size = ARRAY_SIZE(t4vf_reg_ranges);
2614 reg_ranges = t4_reg_ranges;
2615 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
2620 if (adap->flags & IS_VF) {
2621 reg_ranges = t5vf_reg_ranges;
2622 reg_ranges_size = ARRAY_SIZE(t5vf_reg_ranges);
2624 reg_ranges = t5_reg_ranges;
2625 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
2630 if (adap->flags & IS_VF) {
2631 reg_ranges = t6vf_reg_ranges;
2632 reg_ranges_size = ARRAY_SIZE(t6vf_reg_ranges);
2634 reg_ranges = t6_reg_ranges;
2635 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
2641 "Unsupported chip version %d\n", chip_version);
2646 * Clear the register buffer and insert the appropriate register
2647 * values selected by the above register ranges.
2649 memset(buf, 0, buf_size);
2650 for (range = 0; range < reg_ranges_size; range += 2) {
2651 unsigned int reg = reg_ranges[range];
2652 unsigned int last_reg = reg_ranges[range + 1];
2653 u32 *bufp = (u32 *)(buf + reg);
2656 * Iterate across the register range filling in the register
2657 * buffer but don't write past the end of the register buffer.
2659 while (reg <= last_reg && bufp < buf_end) {
2660 *bufp++ = t4_read_reg(adap, reg);
2667 * Partial EEPROM Vital Product Data structure. Includes only the ID and
2679 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
2681 #define EEPROM_DELAY 10 /* 10us per poll spin */
2682 #define EEPROM_MAX_POLL 5000 /* x 5000 == 50ms */
2684 #define EEPROM_STAT_ADDR 0x7bfc
2685 #define VPD_BASE 0x400
2686 #define VPD_BASE_OLD 0
2687 #define VPD_LEN 1024
2688 #define VPD_INFO_FLD_HDR_SIZE 3
2689 #define CHELSIO_VPD_UNIQUE_ID 0x82
2692 * Small utility function to wait till any outstanding VPD Access is complete.
2693 * We have a per-adapter state variable "VPD Busy" to indicate when we have a
2694 * VPD Access in flight. This allows us to handle the problem of having a
2695 * previous VPD Access time out and prevent an attempt to inject a new VPD
2696 * Request before any in-flight VPD reguest has completed.
2698 static int t4_seeprom_wait(struct adapter *adapter)
2700 unsigned int base = adapter->params.pci.vpd_cap_addr;
2704 * If no VPD Access is in flight, we can just return success right
2707 if (!adapter->vpd_busy)
2711 * Poll the VPD Capability Address/Flag register waiting for it
2712 * to indicate that the operation is complete.
2714 max_poll = EEPROM_MAX_POLL;
2718 udelay(EEPROM_DELAY);
2719 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
2722 * If the operation is complete, mark the VPD as no longer
2723 * busy and return success.
2725 if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
2726 adapter->vpd_busy = 0;
2729 } while (--max_poll);
2732 * Failure! Note that we leave the VPD Busy status set in order to
2733 * avoid pushing a new VPD Access request into the VPD Capability till
2734 * the current operation eventually succeeds. It's a bug to issue a
2735 * new request when an existing request is in flight and will result
2736 * in corrupt hardware state.
2742 * t4_seeprom_read - read a serial EEPROM location
2743 * @adapter: adapter to read
2744 * @addr: EEPROM virtual address
2745 * @data: where to store the read data
2747 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
2748 * VPD capability. Note that this function must be called with a virtual
2751 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
2753 unsigned int base = adapter->params.pci.vpd_cap_addr;
2757 * VPD Accesses must alway be 4-byte aligned!
2759 if (addr >= EEPROMVSIZE || (addr & 3))
2763 * Wait for any previous operation which may still be in flight to
2766 ret = t4_seeprom_wait(adapter);
2768 CH_ERR(adapter, "VPD still busy from previous operation\n");
2773 * Issue our new VPD Read request, mark the VPD as being busy and wait
2774 * for our request to complete. If it doesn't complete, note the
2775 * error and return it to our caller. Note that we do not reset the
2778 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
2779 adapter->vpd_busy = 1;
2780 adapter->vpd_flag = PCI_VPD_ADDR_F;
2781 ret = t4_seeprom_wait(adapter);
2783 CH_ERR(adapter, "VPD read of address %#x failed\n", addr);
2788 * Grab the returned data, swizzle it into our endianess and
2791 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
2792 *data = le32_to_cpu(*data);
2797 * t4_seeprom_write - write a serial EEPROM location
2798 * @adapter: adapter to write
2799 * @addr: virtual EEPROM address
2800 * @data: value to write
2802 * Write a 32-bit word to a location in serial EEPROM using the card's PCI
2803 * VPD capability. Note that this function must be called with a virtual
2806 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
2808 unsigned int base = adapter->params.pci.vpd_cap_addr;
2814 * VPD Accesses must alway be 4-byte aligned!
2816 if (addr >= EEPROMVSIZE || (addr & 3))
2820 * Wait for any previous operation which may still be in flight to
2823 ret = t4_seeprom_wait(adapter);
2825 CH_ERR(adapter, "VPD still busy from previous operation\n");
2830 * Issue our new VPD Read request, mark the VPD as being busy and wait
2831 * for our request to complete. If it doesn't complete, note the
2832 * error and return it to our caller. Note that we do not reset the
2835 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
2837 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
2838 (u16)addr | PCI_VPD_ADDR_F);
2839 adapter->vpd_busy = 1;
2840 adapter->vpd_flag = 0;
2841 ret = t4_seeprom_wait(adapter);
2843 CH_ERR(adapter, "VPD write of address %#x failed\n", addr);
2848 * Reset PCI_VPD_DATA register after a transaction and wait for our
2849 * request to complete. If it doesn't complete, return error.
2851 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
2852 max_poll = EEPROM_MAX_POLL;
2854 udelay(EEPROM_DELAY);
2855 t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
2856 } while ((stats_reg & 0x1) && --max_poll);
2860 /* Return success! */
2865 * t4_eeprom_ptov - translate a physical EEPROM address to virtual
2866 * @phys_addr: the physical EEPROM address
2867 * @fn: the PCI function number
2868 * @sz: size of function-specific area
2870 * Translate a physical EEPROM address to virtual. The first 1K is
2871 * accessed through virtual addresses starting at 31K, the rest is
2872 * accessed through virtual addresses starting at 0.
2874 * The mapping is as follows:
2875 * [0..1K) -> [31K..32K)
2876 * [1K..1K+A) -> [ES-A..ES)
2877 * [1K+A..ES) -> [0..ES-A-1K)
2879 * where A = @fn * @sz, and ES = EEPROM size.
2881 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2884 if (phys_addr < 1024)
2885 return phys_addr + (31 << 10);
2886 if (phys_addr < 1024 + fn)
2887 return EEPROMSIZE - fn + phys_addr - 1024;
2888 if (phys_addr < EEPROMSIZE)
2889 return phys_addr - 1024 - fn;
2894 * t4_seeprom_wp - enable/disable EEPROM write protection
2895 * @adapter: the adapter
2896 * @enable: whether to enable or disable write protection
2898 * Enables or disables write protection on the serial EEPROM.
2900 int t4_seeprom_wp(struct adapter *adapter, int enable)
2902 return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
2906 * get_vpd_keyword_val - Locates an information field keyword in the VPD
2907 * @v: Pointer to buffered vpd data structure
2908 * @kw: The keyword to search for
2910 * Returns the value of the information field keyword or
2911 * -ENOENT otherwise.
2913 static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
2916 unsigned int offset , len;
2917 const u8 *buf = (const u8 *)v;
2918 const u8 *vpdr_len = &v->vpdr_len[0];
2919 offset = sizeof(struct t4_vpd_hdr);
2920 len = (u16)vpdr_len[0] + ((u16)vpdr_len[1] << 8);
2922 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
2926 for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
2927 if(memcmp(buf + i , kw , 2) == 0){
2928 i += VPD_INFO_FLD_HDR_SIZE;
2932 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
2940 * get_vpd_params - read VPD parameters from VPD EEPROM
2941 * @adapter: adapter to read
2942 * @p: where to store the parameters
2943 * @vpd: caller provided temporary space to read the VPD into
2945 * Reads card parameters stored in VPD EEPROM.
2947 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p,
2953 const struct t4_vpd_hdr *v;
2956 * Card information normally starts at VPD_BASE but early cards had
2959 ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd));
2964 * The VPD shall have a unique identifier specified by the PCI SIG.
2965 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
2966 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
2967 * is expected to automatically put this entry at the
2968 * beginning of the VPD.
2970 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
2972 for (i = 0; i < VPD_LEN; i += 4) {
2973 ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i));
2977 v = (const struct t4_vpd_hdr *)vpd;
2979 #define FIND_VPD_KW(var,name) do { \
2980 var = get_vpd_keyword_val(v , name); \
2982 CH_ERR(adapter, "missing VPD keyword " name "\n"); \
2987 FIND_VPD_KW(i, "RV");
2988 for (csum = 0; i >= 0; i--)
2993 "corrupted VPD EEPROM, actual csum %u\n", csum);
2997 FIND_VPD_KW(ec, "EC");
2998 FIND_VPD_KW(sn, "SN");
2999 FIND_VPD_KW(pn, "PN");
3000 FIND_VPD_KW(na, "NA");
3003 memcpy(p->id, v->id_data, ID_LEN);
3005 memcpy(p->ec, vpd + ec, EC_LEN);
3007 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
3008 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
3010 i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2];
3011 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
3012 strstrip((char *)p->pn);
3013 i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2];
3014 memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
3015 strstrip((char *)p->na);
3020 /* serial flash and firmware constants and flash config file constants */
3022 SF_ATTEMPTS = 10, /* max retries for SF operations */
3024 /* flash command opcodes */
3025 SF_PROG_PAGE = 2, /* program page */
3026 SF_WR_DISABLE = 4, /* disable writes */
3027 SF_RD_STATUS = 5, /* read status register */
3028 SF_WR_ENABLE = 6, /* enable writes */
3029 SF_RD_DATA_FAST = 0xb, /* read flash */
3030 SF_RD_ID = 0x9f, /* read ID */
3031 SF_ERASE_SECTOR = 0xd8, /* erase sector */
3035 * sf1_read - read data from the serial flash
3036 * @adapter: the adapter
3037 * @byte_cnt: number of bytes to read
3038 * @cont: whether another operation will be chained
3039 * @lock: whether to lock SF for PL access only
3040 * @valp: where to store the read data
3042 * Reads up to 4 bytes of data from the serial flash. The location of
3043 * the read needs to be specified prior to calling this by issuing the
3044 * appropriate commands to the serial flash.
3046 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
3047 int lock, u32 *valp)
3051 if (!byte_cnt || byte_cnt > 4)
3053 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3055 t4_write_reg(adapter, A_SF_OP,
3056 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
3057 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3059 *valp = t4_read_reg(adapter, A_SF_DATA);
3064 * sf1_write - write data to the serial flash
3065 * @adapter: the adapter
3066 * @byte_cnt: number of bytes to write
3067 * @cont: whether another operation will be chained
3068 * @lock: whether to lock SF for PL access only
3069 * @val: value to write
3071 * Writes up to 4 bytes of data to the serial flash. The location of
3072 * the write needs to be specified prior to calling this by issuing the
3073 * appropriate commands to the serial flash.
3075 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
3078 if (!byte_cnt || byte_cnt > 4)
3080 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3082 t4_write_reg(adapter, A_SF_DATA, val);
3083 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
3084 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
3085 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3089 * flash_wait_op - wait for a flash operation to complete
3090 * @adapter: the adapter
3091 * @attempts: max number of polls of the status register
3092 * @delay: delay between polls in ms
3094 * Wait for a flash operation to complete by polling the status register.
3096 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
3102 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
3103 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
3107 if (--attempts == 0)
3115 * t4_read_flash - read words from serial flash
3116 * @adapter: the adapter
3117 * @addr: the start address for the read
3118 * @nwords: how many 32-bit words to read
3119 * @data: where to store the read data
3120 * @byte_oriented: whether to store data as bytes or as words
3122 * Read the specified number of 32-bit words from the serial flash.
3123 * If @byte_oriented is set the read data is stored as a byte array
3124 * (i.e., big-endian), otherwise as 32-bit words in the platform's
3125 * natural endianness.
3127 int t4_read_flash(struct adapter *adapter, unsigned int addr,
3128 unsigned int nwords, u32 *data, int byte_oriented)
3132 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
3135 addr = swab32(addr) | SF_RD_DATA_FAST;
3137 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
3138 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
3141 for ( ; nwords; nwords--, data++) {
3142 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
3144 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3148 *data = (__force __u32)(cpu_to_be32(*data));
3154 * t4_write_flash - write up to a page of data to the serial flash
3155 * @adapter: the adapter
3156 * @addr: the start address to write
3157 * @n: length of data to write in bytes
3158 * @data: the data to write
3159 * @byte_oriented: whether to store data as bytes or as words
3161 * Writes up to a page of data (256 bytes) to the serial flash starting
3162 * at the given address. All the data must be written to the same page.
3163 * If @byte_oriented is set the write data is stored as byte stream
3164 * (i.e. matches what on disk), otherwise in big-endian.
3166 int t4_write_flash(struct adapter *adapter, unsigned int addr,
3167 unsigned int n, const u8 *data, int byte_oriented)
3170 u32 buf[SF_PAGE_SIZE / 4];
3171 unsigned int i, c, left, val, offset = addr & 0xff;
3173 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
3176 val = swab32(addr) | SF_PROG_PAGE;
3178 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3179 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
3182 for (left = n; left; left -= c) {
3184 for (val = 0, i = 0; i < c; ++i)
3185 val = (val << 8) + *data++;
3188 val = cpu_to_be32(val);
3190 ret = sf1_write(adapter, c, c != left, 1, val);
3194 ret = flash_wait_op(adapter, 8, 1);
3198 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3200 /* Read the page to verify the write succeeded */
3201 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
3206 if (memcmp(data - n, (u8 *)buf + offset, n)) {
3208 "failed to correctly write the flash page at %#x\n",
3215 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3220 * t4_get_fw_version - read the firmware version
3221 * @adapter: the adapter
3222 * @vers: where to place the version
3224 * Reads the FW version from flash.
3226 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
3228 return t4_read_flash(adapter, FLASH_FW_START +
3229 offsetof(struct fw_hdr, fw_ver), 1,
3234 * t4_get_bs_version - read the firmware bootstrap version
3235 * @adapter: the adapter
3236 * @vers: where to place the version
3238 * Reads the FW Bootstrap version from flash.
3240 int t4_get_bs_version(struct adapter *adapter, u32 *vers)
3242 return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
3243 offsetof(struct fw_hdr, fw_ver), 1,
3248 * t4_get_tp_version - read the TP microcode version
3249 * @adapter: the adapter
3250 * @vers: where to place the version
3252 * Reads the TP microcode version from flash.
3254 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
3256 return t4_read_flash(adapter, FLASH_FW_START +
3257 offsetof(struct fw_hdr, tp_microcode_ver),
3262 * t4_get_exprom_version - return the Expansion ROM version (if any)
3263 * @adapter: the adapter
3264 * @vers: where to place the version
3266 * Reads the Expansion ROM header from FLASH and returns the version
3267 * number (if present) through the @vers return value pointer. We return
3268 * this in the Firmware Version Format since it's convenient. Return
3269 * 0 on success, -ENOENT if no Expansion ROM is present.
3271 int t4_get_exprom_version(struct adapter *adap, u32 *vers)
3273 struct exprom_header {
3274 unsigned char hdr_arr[16]; /* must start with 0x55aa */
3275 unsigned char hdr_ver[4]; /* Expansion ROM version */
3277 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
3281 ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
3282 ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
3287 hdr = (struct exprom_header *)exprom_header_buf;
3288 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
3291 *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
3292 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
3293 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
3294 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
3299 * t4_get_scfg_version - return the Serial Configuration version
3300 * @adapter: the adapter
3301 * @vers: where to place the version
3303 * Reads the Serial Configuration Version via the Firmware interface
3304 * (thus this can only be called once we're ready to issue Firmware
3305 * commands). The format of the Serial Configuration version is
3306 * adapter specific. Returns 0 on success, an error on failure.
3308 * Note that early versions of the Firmware didn't include the ability
3309 * to retrieve the Serial Configuration version, so we zero-out the
3310 * return-value parameter in that case to avoid leaving it with
3313 * Also note that the Firmware will return its cached copy of the Serial
3314 * Initialization Revision ID, not the actual Revision ID as written in
3315 * the Serial EEPROM. This is only an issue if a new VPD has been written
3316 * and the Firmware/Chip haven't yet gone through a RESET sequence. So
3317 * it's best to defer calling this routine till after a FW_RESET_CMD has
3318 * been issued if the Host Driver will be performing a full adapter
3321 int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
3326 scfgrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3327 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_SCFGREV));
3328 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3329 1, &scfgrev_param, vers);
3336 * t4_get_vpd_version - return the VPD version
3337 * @adapter: the adapter
3338 * @vers: where to place the version
3340 * Reads the VPD via the Firmware interface (thus this can only be called
3341 * once we're ready to issue Firmware commands). The format of the
3342 * VPD version is adapter specific. Returns 0 on success, an error on
3345 * Note that early versions of the Firmware didn't include the ability
3346 * to retrieve the VPD version, so we zero-out the return-value parameter
3347 * in that case to avoid leaving it with garbage in it.
3349 * Also note that the Firmware will return its cached copy of the VPD
3350 * Revision ID, not the actual Revision ID as written in the Serial
3351 * EEPROM. This is only an issue if a new VPD has been written and the
3352 * Firmware/Chip haven't yet gone through a RESET sequence. So it's best
3353 * to defer calling this routine till after a FW_RESET_CMD has been issued
3354 * if the Host Driver will be performing a full adapter initialization.
3356 int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
3361 vpdrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3362 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_VPDREV));
3363 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3364 1, &vpdrev_param, vers);
3371 * t4_get_version_info - extract various chip/firmware version information
3372 * @adapter: the adapter
3374 * Reads various chip/firmware version numbers and stores them into the
3375 * adapter Adapter Parameters structure. If any of the efforts fails
3376 * the first failure will be returned, but all of the version numbers
3379 int t4_get_version_info(struct adapter *adapter)
3383 #define FIRST_RET(__getvinfo) \
3385 int __ret = __getvinfo; \
3386 if (__ret && !ret) \
3390 FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
3391 FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
3392 FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
3393 FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
3394 FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
3395 FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
3403 * t4_flash_erase_sectors - erase a range of flash sectors
3404 * @adapter: the adapter
3405 * @start: the first sector to erase
3406 * @end: the last sector to erase
3408 * Erases the sectors in the given inclusive range.
3410 int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
3414 if (end >= adapter->params.sf_nsec)
3417 while (start <= end) {
3418 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3419 (ret = sf1_write(adapter, 4, 0, 1,
3420 SF_ERASE_SECTOR | (start << 8))) != 0 ||
3421 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
3423 "erase of flash sector %d failed, error %d\n",
3429 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3434 * t4_flash_cfg_addr - return the address of the flash configuration file
3435 * @adapter: the adapter
3437 * Return the address within the flash where the Firmware Configuration
3438 * File is stored, or an error if the device FLASH is too small to contain
3439 * a Firmware Configuration File.
3441 int t4_flash_cfg_addr(struct adapter *adapter)
3444 * If the device FLASH isn't large enough to hold a Firmware
3445 * Configuration File, return an error.
3447 if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
3450 return FLASH_CFG_START;
3454 * Return TRUE if the specified firmware matches the adapter. I.e. T4
3455 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
3456 * and emit an error message for mismatched firmware to save our caller the
3459 static int t4_fw_matches_chip(struct adapter *adap,
3460 const struct fw_hdr *hdr)
3463 * The expression below will return FALSE for any unsupported adapter
3464 * which will keep us "honest" in the future ...
3466 if ((is_t4(adap) && hdr->chip == FW_HDR_CHIP_T4) ||
3467 (is_t5(adap) && hdr->chip == FW_HDR_CHIP_T5) ||
3468 (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6))
3472 "FW image (%d) is not suitable for this adapter (%d)\n",
3473 hdr->chip, chip_id(adap));
3478 * t4_load_fw - download firmware
3479 * @adap: the adapter
3480 * @fw_data: the firmware image to write
3483 * Write the supplied firmware image to the card's serial flash.
3485 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3490 u8 first_page[SF_PAGE_SIZE];
3491 const u32 *p = (const u32 *)fw_data;
3492 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
3493 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
3494 unsigned int fw_start_sec;
3495 unsigned int fw_start;
3496 unsigned int fw_size;
3498 if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) {
3499 fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
3500 fw_start = FLASH_FWBOOTSTRAP_START;
3501 fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
3503 fw_start_sec = FLASH_FW_START_SEC;
3504 fw_start = FLASH_FW_START;
3505 fw_size = FLASH_FW_MAX_SIZE;
3509 CH_ERR(adap, "FW image has no data\n");
3514 "FW image size not multiple of 512 bytes\n");
3517 if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) {
3519 "FW image size differs from size in FW header\n");
3522 if (size > fw_size) {
3523 CH_ERR(adap, "FW image too large, max is %u bytes\n",
3527 if (!t4_fw_matches_chip(adap, hdr))
3530 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
3531 csum += be32_to_cpu(p[i]);
3533 if (csum != 0xffffffff) {
3535 "corrupted firmware image, checksum %#x\n", csum);
3539 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
3540 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
3545 * We write the correct version at the end so the driver can see a bad
3546 * version if the FW write fails. Start by writing a copy of the
3547 * first page with a bad version.
3549 memcpy(first_page, fw_data, SF_PAGE_SIZE);
3550 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
3551 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
3556 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
3557 addr += SF_PAGE_SIZE;
3558 fw_data += SF_PAGE_SIZE;
3559 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
3564 ret = t4_write_flash(adap,
3565 fw_start + offsetof(struct fw_hdr, fw_ver),
3566 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
3569 CH_ERR(adap, "firmware download failed, error %d\n",
3575 * t4_fwcache - firmware cache operation
3576 * @adap: the adapter
3577 * @op : the operation (flush or flush and invalidate)
3579 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
3581 struct fw_params_cmd c;
3583 memset(&c, 0, sizeof(c));
3585 cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
3586 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
3587 V_FW_PARAMS_CMD_PFN(adap->pf) |
3588 V_FW_PARAMS_CMD_VFN(0));
3589 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3591 cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3592 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE));
3593 c.param[0].val = (__force __be32)op;
3595 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
3598 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
3599 unsigned int *pif_req_wrptr,
3600 unsigned int *pif_rsp_wrptr)
3603 u32 cfg, val, req, rsp;
3605 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
3606 if (cfg & F_LADBGEN)
3607 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
3609 val = t4_read_reg(adap, A_CIM_DEBUGSTS);
3610 req = G_POLADBGWRPTR(val);
3611 rsp = G_PILADBGWRPTR(val);
3613 *pif_req_wrptr = req;
3615 *pif_rsp_wrptr = rsp;
3617 for (i = 0; i < CIM_PIFLA_SIZE; i++) {
3618 for (j = 0; j < 6; j++) {
3619 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
3620 V_PILADBGRDPTR(rsp));
3621 *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
3622 *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
3626 req = (req + 2) & M_POLADBGRDPTR;
3627 rsp = (rsp + 2) & M_PILADBGRDPTR;
3629 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
3632 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
3637 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
3638 if (cfg & F_LADBGEN)
3639 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
3641 for (i = 0; i < CIM_MALA_SIZE; i++) {
3642 for (j = 0; j < 5; j++) {
3644 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
3645 V_PILADBGRDPTR(idx));
3646 *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
3647 *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
3650 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
3653 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
3657 for (i = 0; i < 8; i++) {
3658 u32 *p = la_buf + i;
3660 t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
3661 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
3662 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
3663 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
3664 *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
3668 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
3669 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
3670 FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
3673 * t4_link_l1cfg - apply link configuration to MAC/PHY
3674 * @phy: the PHY to setup
3675 * @mac: the MAC to setup
3676 * @lc: the requested link configuration
3678 * Set up a port's MAC and PHY according to a desired link configuration.
3679 * - If the PHY can auto-negotiate first decide what to advertise, then
3680 * enable/disable auto-negotiation as desired, and reset.
3681 * - If the PHY does not auto-negotiate just reset it.
3682 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
3683 * otherwise do it later based on the outcome of auto-negotiation.
3685 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
3686 struct link_config *lc)
3688 struct fw_port_cmd c;
3689 unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
3692 if (lc->requested_fc & PAUSE_RX)
3693 fc |= FW_PORT_CAP_FC_RX;
3694 if (lc->requested_fc & PAUSE_TX)
3695 fc |= FW_PORT_CAP_FC_TX;
3697 memset(&c, 0, sizeof(c));
3698 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
3699 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
3700 V_FW_PORT_CMD_PORTID(port));
3702 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
3705 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
3706 c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
3708 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
3709 } else if (lc->autoneg == AUTONEG_DISABLE) {
3710 c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | mdi);
3711 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
3713 c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | mdi);
3715 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3719 * t4_restart_aneg - restart autonegotiation
3720 * @adap: the adapter
3721 * @mbox: mbox to use for the FW command
3722 * @port: the port id
3724 * Restarts autonegotiation for the selected port.
3726 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
3728 struct fw_port_cmd c;
3730 memset(&c, 0, sizeof(c));
3731 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
3732 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
3733 V_FW_PORT_CMD_PORTID(port));
3735 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
3737 c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
3738 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3741 typedef void (*int_handler_t)(struct adapter *adap);
3744 unsigned int mask; /* bits to check in interrupt status */
3745 const char *msg; /* message to print or NULL */
3746 short stat_idx; /* stat counter to increment or -1 */
3747 unsigned short fatal; /* whether the condition reported is fatal */
3748 int_handler_t int_handler; /* platform-specific int handler */
3752 * t4_handle_intr_status - table driven interrupt handler
3753 * @adapter: the adapter that generated the interrupt
3754 * @reg: the interrupt status register to process
3755 * @acts: table of interrupt actions
3757 * A table driven interrupt handler that applies a set of masks to an
3758 * interrupt status word and performs the corresponding actions if the
3759 * interrupts described by the mask have occurred. The actions include
3760 * optionally emitting a warning or alert message. The table is terminated
3761 * by an entry specifying mask 0. Returns the number of fatal interrupt
3764 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
3765 const struct intr_info *acts)
3768 unsigned int mask = 0;
3769 unsigned int status = t4_read_reg(adapter, reg);
3771 for ( ; acts->mask; ++acts) {
3772 if (!(status & acts->mask))
3776 CH_ALERT(adapter, "%s (0x%x)\n", acts->msg,
3777 status & acts->mask);
3778 } else if (acts->msg)
3779 CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n", acts->msg,
3780 status & acts->mask);
3781 if (acts->int_handler)
3782 acts->int_handler(adapter);
3786 if (status) /* clear processed interrupts */
3787 t4_write_reg(adapter, reg, status);
3792 * Interrupt handler for the PCIE module.
3794 static void pcie_intr_handler(struct adapter *adapter)
3796 static const struct intr_info sysbus_intr_info[] = {
3797 { F_RNPP, "RXNP array parity error", -1, 1 },
3798 { F_RPCP, "RXPC array parity error", -1, 1 },
3799 { F_RCIP, "RXCIF array parity error", -1, 1 },
3800 { F_RCCP, "Rx completions control array parity error", -1, 1 },
3801 { F_RFTP, "RXFT array parity error", -1, 1 },
3804 static const struct intr_info pcie_port_intr_info[] = {
3805 { F_TPCP, "TXPC array parity error", -1, 1 },
3806 { F_TNPP, "TXNP array parity error", -1, 1 },
3807 { F_TFTP, "TXFT array parity error", -1, 1 },
3808 { F_TCAP, "TXCA array parity error", -1, 1 },
3809 { F_TCIP, "TXCIF array parity error", -1, 1 },
3810 { F_RCAP, "RXCA array parity error", -1, 1 },
3811 { F_OTDD, "outbound request TLP discarded", -1, 1 },
3812 { F_RDPE, "Rx data parity error", -1, 1 },
3813 { F_TDUE, "Tx uncorrectable data error", -1, 1 },
3816 static const struct intr_info pcie_intr_info[] = {
3817 { F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
3818 { F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
3819 { F_MSIDATAPERR, "MSI data parity error", -1, 1 },
3820 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
3821 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
3822 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
3823 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
3824 { F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
3825 { F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
3826 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
3827 { F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
3828 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
3829 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
3830 { F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
3831 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
3832 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
3833 { F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
3834 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
3835 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
3836 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
3837 { F_FIDPERR, "PCI FID parity error", -1, 1 },
3838 { F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
3839 { F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
3840 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
3841 { F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
3842 { F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
3843 { F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
3844 { F_PCIESINT, "PCI core secondary fault", -1, 1 },
3845 { F_PCIEPINT, "PCI core primary fault", -1, 1 },
3846 { F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
3851 static const struct intr_info t5_pcie_intr_info[] = {
3852 { F_MSTGRPPERR, "Master Response Read Queue parity error",
3854 { F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
3855 { F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
3856 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
3857 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
3858 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
3859 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
3860 { F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
3862 { F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
3864 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
3865 { F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
3866 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
3867 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
3868 { F_DREQWRPERR, "PCI DMA channel write request parity error",
3870 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
3871 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
3872 { F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
3873 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
3874 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
3875 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
3876 { F_FIDPERR, "PCI FID parity error", -1, 1 },
3877 { F_VFIDPERR, "PCI INTx clear parity error", -1, 1 },
3878 { F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
3879 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
3880 { F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
3882 { F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
3884 { F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
3885 { F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
3886 { F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
3887 { F_READRSPERR, "Outbound read error", -1,
3895 fat = t4_handle_intr_status(adapter,
3896 A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
3898 t4_handle_intr_status(adapter,
3899 A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
3900 pcie_port_intr_info) +
3901 t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
3904 fat = t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
3907 t4_fatal_err(adapter);
3911 * TP interrupt handler.
3913 static void tp_intr_handler(struct adapter *adapter)
3915 static const struct intr_info tp_intr_info[] = {
3916 { 0x3fffffff, "TP parity error", -1, 1 },
3917 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
3921 if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
3922 t4_fatal_err(adapter);
3926 * SGE interrupt handler.
3928 static void sge_intr_handler(struct adapter *adapter)
3933 static const struct intr_info sge_intr_info[] = {
3934 { F_ERR_CPL_EXCEED_IQE_SIZE,
3935 "SGE received CPL exceeding IQE size", -1, 1 },
3936 { F_ERR_INVALID_CIDX_INC,
3937 "SGE GTS CIDX increment too large", -1, 0 },
3938 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
3939 { F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
3940 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
3941 "SGE IQID > 1023 received CPL for FL", -1, 0 },
3942 { F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
3944 { F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
3946 { F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
3948 { F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
3950 { F_ERR_ING_CTXT_PRIO,
3951 "SGE too many priority ingress contexts", -1, 0 },
3952 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
3953 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
3957 static const struct intr_info t4t5_sge_intr_info[] = {
3958 { F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
3959 { F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
3960 { F_ERR_EGR_CTXT_PRIO,
3961 "SGE too many priority egress contexts", -1, 0 },
3966 * For now, treat below interrupts as fatal so that we disable SGE and
3967 * get better debug */
3968 static const struct intr_info t6_sge_intr_info[] = {
3969 { F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1,
3970 "SGE PCIe error for a DBP thread", -1, 1 },
3972 "SGE Actual WRE packet is less than advertized length",
3977 v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
3978 ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
3980 CH_ALERT(adapter, "SGE parity error (%#llx)\n",
3981 (unsigned long long)v);
3982 t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
3983 t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
3986 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
3987 if (chip_id(adapter) <= CHELSIO_T5)
3988 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
3989 t4t5_sge_intr_info);
3991 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
3994 err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
3995 if (err & F_ERROR_QID_VALID) {
3996 CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
3997 if (err & F_UNCAPTURED_ERROR)
3998 CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
3999 t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
4000 F_UNCAPTURED_ERROR);
4004 t4_fatal_err(adapter);
4007 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
4008 F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
4009 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
4010 F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
4013 * CIM interrupt handler.
4015 static void cim_intr_handler(struct adapter *adapter)
4017 static const struct intr_info cim_intr_info[] = {
4018 { F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
4019 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
4020 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
4021 { F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
4022 { F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
4023 { F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
4024 { F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
4027 static const struct intr_info cim_upintr_info[] = {
4028 { F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
4029 { F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
4030 { F_ILLWRINT, "CIM illegal write", -1, 1 },
4031 { F_ILLRDINT, "CIM illegal read", -1, 1 },
4032 { F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
4033 { F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
4034 { F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
4035 { F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
4036 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
4037 { F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
4038 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
4039 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
4040 { F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
4041 { F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
4042 { F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
4043 { F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
4044 { F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
4045 { F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
4046 { F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
4047 { F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
4048 { F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
4049 { F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
4050 { F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
4051 { F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
4052 { F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
4053 { F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
4054 { F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
4055 { F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
4060 if (t4_read_reg(adapter, A_PCIE_FW) & F_PCIE_FW_ERR)
4061 t4_report_fw_error(adapter);
4063 fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
4065 t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
4068 t4_fatal_err(adapter);
4072 * ULP RX interrupt handler.
4074 static void ulprx_intr_handler(struct adapter *adapter)
4076 static const struct intr_info ulprx_intr_info[] = {
4077 { F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
4078 { F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
4079 { 0x7fffff, "ULPRX parity error", -1, 1 },
4083 if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
4084 t4_fatal_err(adapter);
4088 * ULP TX interrupt handler.
4090 static void ulptx_intr_handler(struct adapter *adapter)
4092 static const struct intr_info ulptx_intr_info[] = {
4093 { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
4095 { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
4097 { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
4099 { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
4101 { 0xfffffff, "ULPTX parity error", -1, 1 },
4105 if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
4106 t4_fatal_err(adapter);
4110 * PM TX interrupt handler.
4112 static void pmtx_intr_handler(struct adapter *adapter)
4114 static const struct intr_info pmtx_intr_info[] = {
4115 { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
4116 { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
4117 { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
4118 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
4119 { 0xffffff0, "PMTX framing error", -1, 1 },
4120 { F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
4121 { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
4123 { F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
4124 { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
4128 if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
4129 t4_fatal_err(adapter);
4133 * PM RX interrupt handler.
4135 static void pmrx_intr_handler(struct adapter *adapter)
4137 static const struct intr_info pmrx_intr_info[] = {
4138 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
4139 { 0x3ffff0, "PMRX framing error", -1, 1 },
4140 { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
4141 { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
4143 { F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
4144 { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
4148 if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
4149 t4_fatal_err(adapter);
4153 * CPL switch interrupt handler.
4155 static void cplsw_intr_handler(struct adapter *adapter)
4157 static const struct intr_info cplsw_intr_info[] = {
4158 { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
4159 { F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
4160 { F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
4161 { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
4162 { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
4163 { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
4167 if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
4168 t4_fatal_err(adapter);
4172 * LE interrupt handler.
4174 static void le_intr_handler(struct adapter *adap)
4176 unsigned int chip_ver = chip_id(adap);
4177 static const struct intr_info le_intr_info[] = {
4178 { F_LIPMISS, "LE LIP miss", -1, 0 },
4179 { F_LIP0, "LE 0 LIP error", -1, 0 },
4180 { F_PARITYERR, "LE parity error", -1, 1 },
4181 { F_UNKNOWNCMD, "LE unknown command", -1, 1 },
4182 { F_REQQPARERR, "LE request queue parity error", -1, 1 },
4186 static const struct intr_info t6_le_intr_info[] = {
4187 { F_T6_LIPMISS, "LE LIP miss", -1, 0 },
4188 { F_T6_LIP0, "LE 0 LIP error", -1, 0 },
4189 { F_TCAMINTPERR, "LE parity error", -1, 1 },
4190 { F_T6_UNKNOWNCMD, "LE unknown command", -1, 1 },
4191 { F_SSRAMINTPERR, "LE request queue parity error", -1, 1 },
4195 if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE,
4196 (chip_ver <= CHELSIO_T5) ?
4197 le_intr_info : t6_le_intr_info))
4202 * MPS interrupt handler.
4204 static void mps_intr_handler(struct adapter *adapter)
4206 static const struct intr_info mps_rx_intr_info[] = {
4207 { 0xffffff, "MPS Rx parity error", -1, 1 },
4210 static const struct intr_info mps_tx_intr_info[] = {
4211 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
4212 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4213 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
4215 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
4217 { F_BUBBLE, "MPS Tx underflow", -1, 1 },
4218 { F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
4219 { F_FRMERR, "MPS Tx framing error", -1, 1 },
4222 static const struct intr_info mps_trc_intr_info[] = {
4223 { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
4224 { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
4226 { F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
4229 static const struct intr_info mps_stat_sram_intr_info[] = {
4230 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
4233 static const struct intr_info mps_stat_tx_intr_info[] = {
4234 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
4237 static const struct intr_info mps_stat_rx_intr_info[] = {
4238 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
4241 static const struct intr_info mps_cls_intr_info[] = {
4242 { F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
4243 { F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
4244 { F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
4250 fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
4252 t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
4254 t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
4255 mps_trc_intr_info) +
4256 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
4257 mps_stat_sram_intr_info) +
4258 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
4259 mps_stat_tx_intr_info) +
4260 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
4261 mps_stat_rx_intr_info) +
4262 t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
4265 t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
4266 t4_read_reg(adapter, A_MPS_INT_CAUSE); /* flush */
4268 t4_fatal_err(adapter);
4271 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | \
4275 * EDC/MC interrupt handler.
4277 static void mem_intr_handler(struct adapter *adapter, int idx)
4279 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
4281 unsigned int addr, cnt_addr, v;
4283 if (idx <= MEM_EDC1) {
4284 addr = EDC_REG(A_EDC_INT_CAUSE, idx);
4285 cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
4286 } else if (idx == MEM_MC) {
4287 if (is_t4(adapter)) {
4288 addr = A_MC_INT_CAUSE;
4289 cnt_addr = A_MC_ECC_STATUS;
4291 addr = A_MC_P_INT_CAUSE;
4292 cnt_addr = A_MC_P_ECC_STATUS;
4295 addr = MC_REG(A_MC_P_INT_CAUSE, 1);
4296 cnt_addr = MC_REG(A_MC_P_ECC_STATUS, 1);
4299 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
4300 if (v & F_PERR_INT_CAUSE)
4301 CH_ALERT(adapter, "%s FIFO parity error\n",
4303 if (v & F_ECC_CE_INT_CAUSE) {
4304 u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
4306 t4_edc_err_read(adapter, idx);
4308 t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
4309 CH_WARN_RATELIMIT(adapter,
4310 "%u %s correctable ECC data error%s\n",
4311 cnt, name[idx], cnt > 1 ? "s" : "");
4313 if (v & F_ECC_UE_INT_CAUSE)
4315 "%s uncorrectable ECC data error\n", name[idx]);
4317 t4_write_reg(adapter, addr, v);
4318 if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
4319 t4_fatal_err(adapter);
4323 * MA interrupt handler.
4325 static void ma_intr_handler(struct adapter *adapter)
4327 u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
4329 if (status & F_MEM_PERR_INT_CAUSE) {
4331 "MA parity error, parity status %#x\n",
4332 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS1));
4335 "MA parity error, parity status %#x\n",
4336 t4_read_reg(adapter,
4337 A_MA_PARITY_ERROR_STATUS2));
4339 if (status & F_MEM_WRAP_INT_CAUSE) {
4340 v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
4341 CH_ALERT(adapter, "MA address wrap-around error by "
4342 "client %u to address %#x\n",
4343 G_MEM_WRAP_CLIENT_NUM(v),
4344 G_MEM_WRAP_ADDRESS(v) << 4);
4346 t4_write_reg(adapter, A_MA_INT_CAUSE, status);
4347 t4_fatal_err(adapter);
4351 * SMB interrupt handler.
4353 static void smb_intr_handler(struct adapter *adap)
4355 static const struct intr_info smb_intr_info[] = {
4356 { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
4357 { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
4358 { F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
4362 if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
4367 * NC-SI interrupt handler.
4369 static void ncsi_intr_handler(struct adapter *adap)
4371 static const struct intr_info ncsi_intr_info[] = {
4372 { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
4373 { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
4374 { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
4375 { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
4379 if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
4384 * XGMAC interrupt handler.
4386 static void xgmac_intr_handler(struct adapter *adap, int port)
4388 u32 v, int_cause_reg;
4391 int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
4393 int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
4395 v = t4_read_reg(adap, int_cause_reg);
4397 v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR);
4401 if (v & F_TXFIFO_PRTY_ERR)
4402 CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n",
4404 if (v & F_RXFIFO_PRTY_ERR)
4405 CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n",
4407 t4_write_reg(adap, int_cause_reg, v);
4412 * PL interrupt handler.
4414 static void pl_intr_handler(struct adapter *adap)
4416 static const struct intr_info pl_intr_info[] = {
4417 { F_FATALPERR, "Fatal parity error", -1, 1 },
4418 { F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
4422 static const struct intr_info t5_pl_intr_info[] = {
4423 { F_FATALPERR, "Fatal parity error", -1, 1 },
4427 if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE,
4429 pl_intr_info : t5_pl_intr_info))
4433 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
4436 * t4_slow_intr_handler - control path interrupt handler
4437 * @adapter: the adapter
4439 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
4440 * The designation 'slow' is because it involves register reads, while
4441 * data interrupts typically don't involve any MMIOs.
4443 int t4_slow_intr_handler(struct adapter *adapter)
4445 u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
4447 if (!(cause & GLBL_INTR_MASK))
4450 cim_intr_handler(adapter);
4452 mps_intr_handler(adapter);
4454 ncsi_intr_handler(adapter);
4456 pl_intr_handler(adapter);
4458 smb_intr_handler(adapter);
4460 xgmac_intr_handler(adapter, 0);
4462 xgmac_intr_handler(adapter, 1);
4464 xgmac_intr_handler(adapter, 2);
4466 xgmac_intr_handler(adapter, 3);
4468 pcie_intr_handler(adapter);
4470 mem_intr_handler(adapter, MEM_MC);
4471 if (is_t5(adapter) && (cause & F_MC1))
4472 mem_intr_handler(adapter, MEM_MC1);
4474 mem_intr_handler(adapter, MEM_EDC0);
4476 mem_intr_handler(adapter, MEM_EDC1);
4478 le_intr_handler(adapter);
4480 tp_intr_handler(adapter);
4482 ma_intr_handler(adapter);
4483 if (cause & F_PM_TX)
4484 pmtx_intr_handler(adapter);
4485 if (cause & F_PM_RX)
4486 pmrx_intr_handler(adapter);
4487 if (cause & F_ULP_RX)
4488 ulprx_intr_handler(adapter);
4489 if (cause & F_CPL_SWITCH)
4490 cplsw_intr_handler(adapter);
4492 sge_intr_handler(adapter);
4493 if (cause & F_ULP_TX)
4494 ulptx_intr_handler(adapter);
4496 /* Clear the interrupts just processed for which we are the master. */
4497 t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
4498 (void)t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
4503 * t4_intr_enable - enable interrupts
4504 * @adapter: the adapter whose interrupts should be enabled
4506 * Enable PF-specific interrupts for the calling function and the top-level
4507 * interrupt concentrator for global interrupts. Interrupts are already
4508 * enabled at each module, here we just enable the roots of the interrupt
4511 * Note: this function should be called only when the driver manages
4512 * non PF-specific interrupts from the various HW modules. Only one PCI
4513 * function at a time should be doing this.
4515 void t4_intr_enable(struct adapter *adapter)
4518 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4519 u32 pf = (chip_id(adapter) <= CHELSIO_T5
4520 ? G_SOURCEPF(whoami)
4521 : G_T6_SOURCEPF(whoami));
4523 if (chip_id(adapter) <= CHELSIO_T5)
4524 val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
4526 val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN;
4527 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
4528 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
4529 F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR |
4530 F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
4531 F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
4532 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
4533 F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val);
4534 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
4535 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
4539 * t4_intr_disable - disable interrupts
4540 * @adapter: the adapter whose interrupts should be disabled
4542 * Disable interrupts. We only disable the top-level interrupt
4543 * concentrators. The caller must be a PCI function managing global
4546 void t4_intr_disable(struct adapter *adapter)
4548 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4549 u32 pf = (chip_id(adapter) <= CHELSIO_T5
4550 ? G_SOURCEPF(whoami)
4551 : G_T6_SOURCEPF(whoami));
4553 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
4554 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
4558 * t4_intr_clear - clear all interrupts
4559 * @adapter: the adapter whose interrupts should be cleared
4561 * Clears all interrupts. The caller must be a PCI function managing
4562 * global interrupts.
4564 void t4_intr_clear(struct adapter *adapter)
4566 static const unsigned int cause_reg[] = {
4567 A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
4568 A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
4569 A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS1, A_MA_INT_CAUSE,
4570 A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
4571 A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
4572 MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
4574 A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE,
4575 A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE,
4576 A_MPS_RX_PERR_INT_CAUSE,
4578 MYPF_REG(A_PL_PF_INT_CAUSE),
4585 for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
4586 t4_write_reg(adapter, cause_reg[i], 0xffffffff);
4588 t4_write_reg(adapter, is_t4(adapter) ? A_MC_INT_CAUSE :
4589 A_MC_P_INT_CAUSE, 0xffffffff);
4591 if (is_t4(adapter)) {
4592 t4_write_reg(adapter, A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
4594 t4_write_reg(adapter, A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
4597 t4_write_reg(adapter, A_MA_PARITY_ERROR_STATUS2, 0xffffffff);
4599 t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
4600 (void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
4604 * hash_mac_addr - return the hash value of a MAC address
4605 * @addr: the 48-bit Ethernet MAC address
4607 * Hashes a MAC address according to the hash function used by HW inexact
4608 * (hash) address matching.
4610 static int hash_mac_addr(const u8 *addr)
4612 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
4613 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
4621 * t4_config_rss_range - configure a portion of the RSS mapping table
4622 * @adapter: the adapter
4623 * @mbox: mbox to use for the FW command
4624 * @viid: virtual interface whose RSS subtable is to be written
4625 * @start: start entry in the table to write
4626 * @n: how many table entries to write
4627 * @rspq: values for the "response queue" (Ingress Queue) lookup table
4628 * @nrspq: number of values in @rspq
4630 * Programs the selected part of the VI's RSS mapping table with the
4631 * provided values. If @nrspq < @n the supplied values are used repeatedly
4632 * until the full table range is populated.
4634 * The caller must ensure the values in @rspq are in the range allowed for
4637 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
4638 int start, int n, const u16 *rspq, unsigned int nrspq)
4641 const u16 *rsp = rspq;
4642 const u16 *rsp_end = rspq + nrspq;
4643 struct fw_rss_ind_tbl_cmd cmd;
4645 memset(&cmd, 0, sizeof(cmd));
4646 cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
4647 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4648 V_FW_RSS_IND_TBL_CMD_VIID(viid));
4649 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
4652 * Each firmware RSS command can accommodate up to 32 RSS Ingress
4653 * Queue Identifiers. These Ingress Queue IDs are packed three to
4654 * a 32-bit word as 10-bit values with the upper remaining 2 bits
4658 int nq = min(n, 32);
4660 __be32 *qp = &cmd.iq0_to_iq2;
4663 * Set up the firmware RSS command header to send the next
4664 * "nq" Ingress Queue IDs to the firmware.
4666 cmd.niqid = cpu_to_be16(nq);
4667 cmd.startidx = cpu_to_be16(start);
4670 * "nq" more done for the start of the next loop.
4676 * While there are still Ingress Queue IDs to stuff into the
4677 * current firmware RSS command, retrieve them from the
4678 * Ingress Queue ID array and insert them into the command.
4682 * Grab up to the next 3 Ingress Queue IDs (wrapping
4683 * around the Ingress Queue ID array if necessary) and
4684 * insert them into the firmware RSS command at the
4685 * current 3-tuple position within the commad.
4689 int nqbuf = min(3, nq);
4692 qbuf[0] = qbuf[1] = qbuf[2] = 0;
4693 while (nqbuf && nq_packed < 32) {
4700 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
4701 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
4702 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
4706 * Send this portion of the RRS table update to the firmware;
4707 * bail out on any errors.
4709 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
4717 * t4_config_glbl_rss - configure the global RSS mode
4718 * @adapter: the adapter
4719 * @mbox: mbox to use for the FW command
4720 * @mode: global RSS mode
4721 * @flags: mode-specific flags
4723 * Sets the global RSS mode.
4725 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
4728 struct fw_rss_glb_config_cmd c;
4730 memset(&c, 0, sizeof(c));
4731 c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
4732 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
4733 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4734 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
4735 c.u.manual.mode_pkd =
4736 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
4737 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
4738 c.u.basicvirtual.mode_pkd =
4739 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
4740 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
4743 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
4747 * t4_config_vi_rss - configure per VI RSS settings
4748 * @adapter: the adapter
4749 * @mbox: mbox to use for the FW command
4752 * @defq: id of the default RSS queue for the VI.
4754 * Configures VI-specific RSS properties.
4756 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
4757 unsigned int flags, unsigned int defq)
4759 struct fw_rss_vi_config_cmd c;
4761 memset(&c, 0, sizeof(c));
4762 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
4763 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4764 V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
4765 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4766 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
4767 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
4768 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
4771 /* Read an RSS table row */
4772 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
4774 t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
4775 return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
4780 * t4_read_rss - read the contents of the RSS mapping table
4781 * @adapter: the adapter
4782 * @map: holds the contents of the RSS mapping table
4784 * Reads the contents of the RSS hash->queue mapping table.
4786 int t4_read_rss(struct adapter *adapter, u16 *map)
4791 for (i = 0; i < RSS_NENTRIES / 2; ++i) {
4792 ret = rd_rss_row(adapter, i, &val);
4795 *map++ = G_LKPTBLQUEUE0(val);
4796 *map++ = G_LKPTBLQUEUE1(val);
4802 * t4_fw_tp_pio_rw - Access TP PIO through LDST
4803 * @adap: the adapter
4804 * @vals: where the indirect register values are stored/written
4805 * @nregs: how many indirect registers to read/write
4806 * @start_idx: index of first indirect register to read/write
4807 * @rw: Read (1) or Write (0)
4809 * Access TP PIO registers through LDST
4811 void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
4812 unsigned int start_index, unsigned int rw)
4815 int cmd = FW_LDST_ADDRSPC_TP_PIO;
4816 struct fw_ldst_cmd c;
4818 for (i = 0 ; i < nregs; i++) {
4819 memset(&c, 0, sizeof(c));
4820 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
4822 (rw ? F_FW_CMD_READ :
4824 V_FW_LDST_CMD_ADDRSPACE(cmd));
4825 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
4827 c.u.addrval.addr = cpu_to_be32(start_index + i);
4828 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
4829 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4832 vals[i] = be32_to_cpu(c.u.addrval.val);
4838 * t4_read_rss_key - read the global RSS key
4839 * @adap: the adapter
4840 * @key: 10-entry array holding the 320-bit RSS key
4842 * Reads the global 320-bit RSS key.
4844 void t4_read_rss_key(struct adapter *adap, u32 *key)
4846 if (t4_use_ldst(adap))
4847 t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 1);
4849 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
4850 A_TP_RSS_SECRET_KEY0);
4854 * t4_write_rss_key - program one of the RSS keys
4855 * @adap: the adapter
4856 * @key: 10-entry array holding the 320-bit RSS key
4857 * @idx: which RSS key to write
4859 * Writes one of the RSS keys with the given 320-bit value. If @idx is
4860 * 0..15 the corresponding entry in the RSS key table is written,
4861 * otherwise the global RSS key is written.
4863 void t4_write_rss_key(struct adapter *adap, u32 *key, int idx)
4865 u8 rss_key_addr_cnt = 16;
4866 u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
4869 * T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
4870 * allows access to key addresses 16-63 by using KeyWrAddrX
4871 * as index[5:4](upper 2) into key table
4873 if ((chip_id(adap) > CHELSIO_T5) &&
4874 (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
4875 rss_key_addr_cnt = 32;
4877 if (t4_use_ldst(adap))
4878 t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 0);
4880 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
4881 A_TP_RSS_SECRET_KEY0);
4883 if (idx >= 0 && idx < rss_key_addr_cnt) {
4884 if (rss_key_addr_cnt > 16)
4885 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
4886 V_KEYWRADDRX(idx >> 4) |
4887 V_T6_VFWRADDR(idx) | F_KEYWREN);
4889 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
4890 V_KEYWRADDR(idx) | F_KEYWREN);
4895 * t4_read_rss_pf_config - read PF RSS Configuration Table
4896 * @adapter: the adapter
4897 * @index: the entry in the PF RSS table to read
4898 * @valp: where to store the returned value
4900 * Reads the PF RSS Configuration Table at the specified index and returns
4901 * the value found there.
4903 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
4906 if (t4_use_ldst(adapter))
4907 t4_fw_tp_pio_rw(adapter, valp, 1,
4908 A_TP_RSS_PF0_CONFIG + index, 1);
4910 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4911 valp, 1, A_TP_RSS_PF0_CONFIG + index);
4915 * t4_write_rss_pf_config - write PF RSS Configuration Table
4916 * @adapter: the adapter
4917 * @index: the entry in the VF RSS table to read
4918 * @val: the value to store
4920 * Writes the PF RSS Configuration Table at the specified index with the
4923 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index,
4926 if (t4_use_ldst(adapter))
4927 t4_fw_tp_pio_rw(adapter, &val, 1,
4928 A_TP_RSS_PF0_CONFIG + index, 0);
4930 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4931 &val, 1, A_TP_RSS_PF0_CONFIG + index);
4935 * t4_read_rss_vf_config - read VF RSS Configuration Table
4936 * @adapter: the adapter
4937 * @index: the entry in the VF RSS table to read
4938 * @vfl: where to store the returned VFL
4939 * @vfh: where to store the returned VFH
4941 * Reads the VF RSS Configuration Table at the specified index and returns
4942 * the (VFL, VFH) values found there.
4944 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
4947 u32 vrt, mask, data;
4949 if (chip_id(adapter) <= CHELSIO_T5) {
4950 mask = V_VFWRADDR(M_VFWRADDR);
4951 data = V_VFWRADDR(index);
4953 mask = V_T6_VFWRADDR(M_T6_VFWRADDR);
4954 data = V_T6_VFWRADDR(index);
4957 * Request that the index'th VF Table values be read into VFL/VFH.
4959 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
4960 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
4961 vrt |= data | F_VFRDEN;
4962 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
4965 * Grab the VFL/VFH values ...
4967 if (t4_use_ldst(adapter)) {
4968 t4_fw_tp_pio_rw(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, 1);
4969 t4_fw_tp_pio_rw(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, 1);
4971 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4972 vfl, 1, A_TP_RSS_VFL_CONFIG);
4973 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4974 vfh, 1, A_TP_RSS_VFH_CONFIG);
4979 * t4_write_rss_vf_config - write VF RSS Configuration Table
4981 * @adapter: the adapter
4982 * @index: the entry in the VF RSS table to write
4983 * @vfl: the VFL to store
4984 * @vfh: the VFH to store
4986 * Writes the VF RSS Configuration Table at the specified index with the
4987 * specified (VFL, VFH) values.
4989 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
4992 u32 vrt, mask, data;
4994 if (chip_id(adapter) <= CHELSIO_T5) {
4995 mask = V_VFWRADDR(M_VFWRADDR);
4996 data = V_VFWRADDR(index);
4998 mask = V_T6_VFWRADDR(M_T6_VFWRADDR);
4999 data = V_T6_VFWRADDR(index);
5003 * Load up VFL/VFH with the values to be written ...
5005 if (t4_use_ldst(adapter)) {
5006 t4_fw_tp_pio_rw(adapter, &vfl, 1, A_TP_RSS_VFL_CONFIG, 0);
5007 t4_fw_tp_pio_rw(adapter, &vfh, 1, A_TP_RSS_VFH_CONFIG, 0);
5009 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5010 &vfl, 1, A_TP_RSS_VFL_CONFIG);
5011 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5012 &vfh, 1, A_TP_RSS_VFH_CONFIG);
5016 * Write the VFL/VFH into the VF Table at index'th location.
5018 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
5019 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
5020 vrt |= data | F_VFRDEN;
5021 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
5025 * t4_read_rss_pf_map - read PF RSS Map
5026 * @adapter: the adapter
5028 * Reads the PF RSS Map register and returns its value.
5030 u32 t4_read_rss_pf_map(struct adapter *adapter)
5034 if (t4_use_ldst(adapter))
5035 t4_fw_tp_pio_rw(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, 1);
5037 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5038 &pfmap, 1, A_TP_RSS_PF_MAP);
5043 * t4_write_rss_pf_map - write PF RSS Map
5044 * @adapter: the adapter
5045 * @pfmap: PF RSS Map value
5047 * Writes the specified value to the PF RSS Map register.
5049 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap)
5051 if (t4_use_ldst(adapter))
5052 t4_fw_tp_pio_rw(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, 0);
5054 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5055 &pfmap, 1, A_TP_RSS_PF_MAP);
5059 * t4_read_rss_pf_mask - read PF RSS Mask
5060 * @adapter: the adapter
5062 * Reads the PF RSS Mask register and returns its value.
5064 u32 t4_read_rss_pf_mask(struct adapter *adapter)
5068 if (t4_use_ldst(adapter))
5069 t4_fw_tp_pio_rw(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, 1);
5071 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5072 &pfmask, 1, A_TP_RSS_PF_MSK);
5077 * t4_write_rss_pf_mask - write PF RSS Mask
5078 * @adapter: the adapter
5079 * @pfmask: PF RSS Mask value
5081 * Writes the specified value to the PF RSS Mask register.
5083 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask)
5085 if (t4_use_ldst(adapter))
5086 t4_fw_tp_pio_rw(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, 0);
5088 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5089 &pfmask, 1, A_TP_RSS_PF_MSK);
5093 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
5094 * @adap: the adapter
5095 * @v4: holds the TCP/IP counter values
5096 * @v6: holds the TCP/IPv6 counter values
5098 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
5099 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
5101 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
5102 struct tp_tcp_stats *v6)
5104 u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
5106 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
5107 #define STAT(x) val[STAT_IDX(x)]
5108 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
5111 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
5112 ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST);
5113 v4->tcp_out_rsts = STAT(OUT_RST);
5114 v4->tcp_in_segs = STAT64(IN_SEG);
5115 v4->tcp_out_segs = STAT64(OUT_SEG);
5116 v4->tcp_retrans_segs = STAT64(RXT_SEG);
5119 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
5120 ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST);
5121 v6->tcp_out_rsts = STAT(OUT_RST);
5122 v6->tcp_in_segs = STAT64(IN_SEG);
5123 v6->tcp_out_segs = STAT64(OUT_SEG);
5124 v6->tcp_retrans_segs = STAT64(RXT_SEG);
5132 * t4_tp_get_err_stats - read TP's error MIB counters
5133 * @adap: the adapter
5134 * @st: holds the counter values
5136 * Returns the values of TP's error counters.
5138 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
5140 int nchan = adap->chip_params->nchan;
5142 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5143 st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0);
5144 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5145 st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0);
5146 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5147 st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0);
5148 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5149 st->tnl_cong_drops, nchan, A_TP_MIB_TNL_CNG_DROP_0);
5150 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5151 st->ofld_chan_drops, nchan, A_TP_MIB_OFD_CHN_DROP_0);
5152 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5153 st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0);
5154 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5155 st->ofld_vlan_drops, nchan, A_TP_MIB_OFD_VLN_DROP_0);
5156 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5157 st->tcp6_in_errs, nchan, A_TP_MIB_TCP_V6IN_ERR_0);
5159 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5160 &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP);
5164 * t4_tp_get_proxy_stats - read TP's proxy MIB counters
5165 * @adap: the adapter
5166 * @st: holds the counter values
5168 * Returns the values of TP's proxy counters.
5170 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st)
5172 int nchan = adap->chip_params->nchan;
5174 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy,
5175 nchan, A_TP_MIB_TNL_LPBK_0);
5179 * t4_tp_get_cpl_stats - read TP's CPL MIB counters
5180 * @adap: the adapter
5181 * @st: holds the counter values
5183 * Returns the values of TP's CPL counters.
5185 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
5187 int nchan = adap->chip_params->nchan;
5189 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req,
5190 nchan, A_TP_MIB_CPL_IN_REQ_0);
5191 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->rsp,
5192 nchan, A_TP_MIB_CPL_OUT_RSP_0);
5196 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
5197 * @adap: the adapter
5198 * @st: holds the counter values
5200 * Returns the values of TP's RDMA counters.
5202 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
5204 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_pkt,
5205 2, A_TP_MIB_RQE_DFR_PKT);
5209 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
5210 * @adap: the adapter
5211 * @idx: the port index
5212 * @st: holds the counter values
5214 * Returns the values of TP's FCoE counters for the selected port.
5216 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
5217 struct tp_fcoe_stats *st)
5221 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->frames_ddp,
5222 1, A_TP_MIB_FCOE_DDP_0 + idx);
5223 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->frames_drop,
5224 1, A_TP_MIB_FCOE_DROP_0 + idx);
5225 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
5226 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx);
5227 st->octets_ddp = ((u64)val[0] << 32) | val[1];
5231 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
5232 * @adap: the adapter
5233 * @st: holds the counter values
5235 * Returns the values of TP's counters for non-TCP directly-placed packets.
5237 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
5241 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4,
5243 st->frames = val[0];
5245 st->octets = ((u64)val[2] << 32) | val[3];
5249 * t4_read_mtu_tbl - returns the values in the HW path MTU table
5250 * @adap: the adapter
5251 * @mtus: where to store the MTU values
5252 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
5254 * Reads the HW path MTU table.
5256 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
5261 for (i = 0; i < NMTUS; ++i) {
5262 t4_write_reg(adap, A_TP_MTU_TABLE,
5263 V_MTUINDEX(0xff) | V_MTUVALUE(i));
5264 v = t4_read_reg(adap, A_TP_MTU_TABLE);
5265 mtus[i] = G_MTUVALUE(v);
5267 mtu_log[i] = G_MTUWIDTH(v);
5272 * t4_read_cong_tbl - reads the congestion control table
5273 * @adap: the adapter
5274 * @incr: where to store the alpha values
5276 * Reads the additive increments programmed into the HW congestion
5279 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
5281 unsigned int mtu, w;
5283 for (mtu = 0; mtu < NMTUS; ++mtu)
5284 for (w = 0; w < NCCTRL_WIN; ++w) {
5285 t4_write_reg(adap, A_TP_CCTRL_TABLE,
5286 V_ROWINDEX(0xffff) | (mtu << 5) | w);
5287 incr[mtu][w] = (u16)t4_read_reg(adap,
5288 A_TP_CCTRL_TABLE) & 0x1fff;
5293 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
5294 * @adap: the adapter
5295 * @addr: the indirect TP register address
5296 * @mask: specifies the field within the register to modify
5297 * @val: new value for the field
5299 * Sets a field of an indirect TP register to the given value.
5301 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
5302 unsigned int mask, unsigned int val)
5304 t4_write_reg(adap, A_TP_PIO_ADDR, addr);
5305 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
5306 t4_write_reg(adap, A_TP_PIO_DATA, val);
5310 * init_cong_ctrl - initialize congestion control parameters
5311 * @a: the alpha values for congestion control
5312 * @b: the beta values for congestion control
5314 * Initialize the congestion control parameters.
5316 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
5318 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
5343 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
5346 b[13] = b[14] = b[15] = b[16] = 3;
5347 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
5348 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
5353 /* The minimum additive increment value for the congestion control table */
5354 #define CC_MIN_INCR 2U
5357 * t4_load_mtus - write the MTU and congestion control HW tables
5358 * @adap: the adapter
5359 * @mtus: the values for the MTU table
5360 * @alpha: the values for the congestion control alpha parameter
5361 * @beta: the values for the congestion control beta parameter
5363 * Write the HW MTU table with the supplied MTUs and the high-speed
5364 * congestion control table with the supplied alpha, beta, and MTUs.
5365 * We write the two tables together because the additive increments
5366 * depend on the MTUs.
5368 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
5369 const unsigned short *alpha, const unsigned short *beta)
5371 static const unsigned int avg_pkts[NCCTRL_WIN] = {
5372 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
5373 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
5374 28672, 40960, 57344, 81920, 114688, 163840, 229376
5379 for (i = 0; i < NMTUS; ++i) {
5380 unsigned int mtu = mtus[i];
5381 unsigned int log2 = fls(mtu);
5383 if (!(mtu & ((1 << log2) >> 2))) /* round */
5385 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
5386 V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
5388 for (w = 0; w < NCCTRL_WIN; ++w) {
5391 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
5394 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
5395 (w << 16) | (beta[w] << 13) | inc);
5401 * t4_set_pace_tbl - set the pace table
5402 * @adap: the adapter
5403 * @pace_vals: the pace values in microseconds
5404 * @start: index of the first entry in the HW pace table to set
5405 * @n: how many entries to set
5407 * Sets (a subset of the) HW pace table.
5409 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
5410 unsigned int start, unsigned int n)
5412 unsigned int vals[NTX_SCHED], i;
5413 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
5418 /* convert values from us to dack ticks, rounding to closest value */
5419 for (i = 0; i < n; i++, pace_vals++) {
5420 vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
5421 if (vals[i] > 0x7ff)
5423 if (*pace_vals && vals[i] == 0)
5426 for (i = 0; i < n; i++, start++)
5427 t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
5432 * t4_set_sched_bps - set the bit rate for a HW traffic scheduler
5433 * @adap: the adapter
5434 * @kbps: target rate in Kbps
5435 * @sched: the scheduler index
5437 * Configure a Tx HW scheduler for the target rate.
5439 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
5441 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
5442 unsigned int clk = adap->params.vpd.cclk * 1000;
5443 unsigned int selected_cpt = 0, selected_bpt = 0;
5446 kbps *= 125; /* -> bytes */
5447 for (cpt = 1; cpt <= 255; cpt++) {
5449 bpt = (kbps + tps / 2) / tps;
5450 if (bpt > 0 && bpt <= 255) {
5452 delta = v >= kbps ? v - kbps : kbps - v;
5453 if (delta < mindelta) {
5458 } else if (selected_cpt)
5464 t4_write_reg(adap, A_TP_TM_PIO_ADDR,
5465 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
5466 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
5468 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
5470 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
5471 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
5476 * t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
5477 * @adap: the adapter
5478 * @sched: the scheduler index
5479 * @ipg: the interpacket delay in tenths of nanoseconds
5481 * Set the interpacket delay for a HW packet rate scheduler.
5483 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
5485 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
5487 /* convert ipg to nearest number of core clocks */
5488 ipg *= core_ticks_per_usec(adap);
5489 ipg = (ipg + 5000) / 10000;
5490 if (ipg > M_TXTIMERSEPQ0)
5493 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
5494 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
5496 v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
5498 v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
5499 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
5500 t4_read_reg(adap, A_TP_TM_PIO_DATA);
5505 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
5506 * clocks. The formula is
5508 * bytes/s = bytes256 * 256 * ClkFreq / 4096
5510 * which is equivalent to
5512 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
5514 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
5516 u64 v = bytes256 * adap->params.vpd.cclk;
5518 return v * 62 + v / 2;
5522 * t4_get_chan_txrate - get the current per channel Tx rates
5523 * @adap: the adapter
5524 * @nic_rate: rates for NIC traffic
5525 * @ofld_rate: rates for offloaded traffic
5527 * Return the current Tx rates in bytes/s for NIC and offloaded traffic
5530 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
5534 v = t4_read_reg(adap, A_TP_TX_TRATE);
5535 nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
5536 nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
5537 if (adap->chip_params->nchan > 2) {
5538 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
5539 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
5542 v = t4_read_reg(adap, A_TP_TX_ORATE);
5543 ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
5544 ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
5545 if (adap->chip_params->nchan > 2) {
5546 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
5547 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
5552 * t4_set_trace_filter - configure one of the tracing filters
5553 * @adap: the adapter
5554 * @tp: the desired trace filter parameters
5555 * @idx: which filter to configure
5556 * @enable: whether to enable or disable the filter
5558 * Configures one of the tracing filters available in HW. If @tp is %NULL
5559 * it indicates that the filter is already written in the register and it
5560 * just needs to be enabled or disabled.
5562 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
5563 int idx, int enable)
5565 int i, ofst = idx * 4;
5566 u32 data_reg, mask_reg, cfg;
5567 u32 multitrc = F_TRCMULTIFILTER;
5568 u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN;
5570 if (idx < 0 || idx >= NTRACE)
5573 if (tp == NULL || !enable) {
5574 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en,
5580 * TODO - After T4 data book is updated, specify the exact
5583 * See T4 data book - MPS section for a complete description
5584 * of the below if..else handling of A_MPS_TRC_CFG register
5587 cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
5588 if (cfg & F_TRCMULTIFILTER) {
5590 * If multiple tracers are enabled, then maximum
5591 * capture size is 2.5KB (FIFO size of a single channel)
5592 * minus 2 flits for CPL_TRACE_PKT header.
5594 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
5598 * If multiple tracers are disabled, to avoid deadlocks
5599 * maximum packet capture size of 9600 bytes is recommended.
5600 * Also in this mode, only trace0 can be enabled and running.
5603 if (tp->snap_len > 9600 || idx)
5607 if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 ||
5608 tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET ||
5609 tp->min_len > M_TFMINPKTSIZE)
5612 /* stop the tracer we'll be changing */
5613 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0);
5615 idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
5616 data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
5617 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
5619 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5620 t4_write_reg(adap, data_reg, tp->data[i]);
5621 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
5623 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
5624 V_TFCAPTUREMAX(tp->snap_len) |
5625 V_TFMINPKTSIZE(tp->min_len));
5626 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
5627 V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en |
5629 V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) :
5630 V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert)));
5636 * t4_get_trace_filter - query one of the tracing filters
5637 * @adap: the adapter
5638 * @tp: the current trace filter parameters
5639 * @idx: which trace filter to query
5640 * @enabled: non-zero if the filter is enabled
5642 * Returns the current settings of one of the HW tracing filters.
5644 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
5648 int i, ofst = idx * 4;
5649 u32 data_reg, mask_reg;
5651 ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
5652 ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
5655 *enabled = !!(ctla & F_TFEN);
5656 tp->port = G_TFPORT(ctla);
5657 tp->invert = !!(ctla & F_TFINVERTMATCH);
5659 *enabled = !!(ctla & F_T5_TFEN);
5660 tp->port = G_T5_TFPORT(ctla);
5661 tp->invert = !!(ctla & F_T5_TFINVERTMATCH);
5663 tp->snap_len = G_TFCAPTUREMAX(ctlb);
5664 tp->min_len = G_TFMINPKTSIZE(ctlb);
5665 tp->skip_ofst = G_TFOFFSET(ctla);
5666 tp->skip_len = G_TFLENGTH(ctla);
5668 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
5669 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
5670 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
5672 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5673 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
5674 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
5679 * t4_pmtx_get_stats - returns the HW stats from PMTX
5680 * @adap: the adapter
5681 * @cnt: where to store the count statistics
5682 * @cycles: where to store the cycle statistics
5684 * Returns performance statistics from PMTX.
5686 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5691 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
5692 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
5693 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
5695 cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
5697 t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
5698 A_PM_TX_DBG_DATA, data, 2,
5699 A_PM_TX_DBG_STAT_MSB);
5700 cycles[i] = (((u64)data[0] << 32) | data[1]);
5706 * t4_pmrx_get_stats - returns the HW stats from PMRX
5707 * @adap: the adapter
5708 * @cnt: where to store the count statistics
5709 * @cycles: where to store the cycle statistics
5711 * Returns performance statistics from PMRX.
5713 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5718 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
5719 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
5720 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
5722 cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
5724 t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
5725 A_PM_RX_DBG_DATA, data, 2,
5726 A_PM_RX_DBG_STAT_MSB);
5727 cycles[i] = (((u64)data[0] << 32) | data[1]);
5733 * t4_get_mps_bg_map - return the buffer groups associated with a port
5734 * @adap: the adapter
5735 * @idx: the port index
5737 * Returns a bitmap indicating which MPS buffer groups are associated
5738 * with the given port. Bit i is set if buffer group i is used by the
5741 static unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
5743 u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
5746 return idx == 0 ? 0xf : 0;
5747 if (n == 1 && chip_id(adap) <= CHELSIO_T5)
5748 return idx < 2 ? (3 << (2 * idx)) : 0;
5753 * t4_get_port_type_description - return Port Type string description
5754 * @port_type: firmware Port Type enumeration
5756 const char *t4_get_port_type_description(enum fw_port_type port_type)
5758 static const char *const port_type_description[] = {
5777 if (port_type < ARRAY_SIZE(port_type_description))
5778 return port_type_description[port_type];
5783 * t4_get_port_stats_offset - collect port stats relative to a previous
5785 * @adap: The adapter
5787 * @stats: Current stats to fill
5788 * @offset: Previous stats snapshot
5790 void t4_get_port_stats_offset(struct adapter *adap, int idx,
5791 struct port_stats *stats,
5792 struct port_stats *offset)
5797 t4_get_port_stats(adap, idx, stats);
5798 for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
5799 i < (sizeof(struct port_stats)/sizeof(u64)) ;
5805 * t4_get_port_stats - collect port statistics
5806 * @adap: the adapter
5807 * @idx: the port index
5808 * @p: the stats structure to fill
5810 * Collect statistics related to the given port from HW.
5812 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
5814 u32 bgmap = t4_get_mps_bg_map(adap, idx);
5817 #define GET_STAT(name) \
5818 t4_read_reg64(adap, \
5819 (is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
5820 T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
5821 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
5823 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
5825 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
5826 p->tx_octets = GET_STAT(TX_PORT_BYTES);
5827 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
5828 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
5829 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
5830 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
5831 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
5832 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
5833 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
5834 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
5835 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
5836 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
5837 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
5838 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
5839 p->tx_drop = GET_STAT(TX_PORT_DROP);
5840 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
5841 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
5842 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
5843 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
5844 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
5845 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
5846 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
5847 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
5849 if (stat_ctl & F_COUNTPAUSESTATTX) {
5850 p->tx_frames -= p->tx_pause;
5851 p->tx_octets -= p->tx_pause * 64;
5852 p->tx_mcast_frames -= p->tx_pause;
5855 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
5856 p->rx_octets = GET_STAT(RX_PORT_BYTES);
5857 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
5858 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
5859 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
5860 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
5861 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
5862 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
5863 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
5864 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
5865 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
5866 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
5867 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
5868 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
5869 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
5870 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
5871 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
5872 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
5873 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
5874 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
5875 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
5876 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
5877 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
5878 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
5879 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
5880 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
5881 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
5883 if (stat_ctl & F_COUNTPAUSESTATRX) {
5884 p->rx_frames -= p->rx_pause;
5885 p->rx_octets -= p->rx_pause * 64;
5886 p->rx_mcast_frames -= p->rx_pause;
5889 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
5890 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
5891 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
5892 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
5893 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
5894 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
5895 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
5896 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
5903 * t4_get_lb_stats - collect loopback port statistics
5904 * @adap: the adapter
5905 * @idx: the loopback port index
5906 * @p: the stats structure to fill
5908 * Return HW statistics for the given loopback port.
5910 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
5912 u32 bgmap = t4_get_mps_bg_map(adap, idx);
5914 #define GET_STAT(name) \
5915 t4_read_reg64(adap, \
5917 PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
5918 T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
5919 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
5921 p->octets = GET_STAT(BYTES);
5922 p->frames = GET_STAT(FRAMES);
5923 p->bcast_frames = GET_STAT(BCAST);
5924 p->mcast_frames = GET_STAT(MCAST);
5925 p->ucast_frames = GET_STAT(UCAST);
5926 p->error_frames = GET_STAT(ERROR);
5928 p->frames_64 = GET_STAT(64B);
5929 p->frames_65_127 = GET_STAT(65B_127B);
5930 p->frames_128_255 = GET_STAT(128B_255B);
5931 p->frames_256_511 = GET_STAT(256B_511B);
5932 p->frames_512_1023 = GET_STAT(512B_1023B);
5933 p->frames_1024_1518 = GET_STAT(1024B_1518B);
5934 p->frames_1519_max = GET_STAT(1519B_MAX);
5935 p->drop = GET_STAT(DROP_FRAMES);
5937 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
5938 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
5939 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
5940 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
5941 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
5942 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
5943 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
5944 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
5951 * t4_wol_magic_enable - enable/disable magic packet WoL
5952 * @adap: the adapter
5953 * @port: the physical port index
5954 * @addr: MAC address expected in magic packets, %NULL to disable
5956 * Enables/disables magic packet wake-on-LAN for the selected port.
5958 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
5961 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
5964 mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO);
5965 mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI);
5966 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
5968 mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO);
5969 mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI);
5970 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
5974 t4_write_reg(adap, mag_id_reg_l,
5975 (addr[2] << 24) | (addr[3] << 16) |
5976 (addr[4] << 8) | addr[5]);
5977 t4_write_reg(adap, mag_id_reg_h,
5978 (addr[0] << 8) | addr[1]);
5980 t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN,
5981 V_MAGICEN(addr != NULL));
5985 * t4_wol_pat_enable - enable/disable pattern-based WoL
5986 * @adap: the adapter
5987 * @port: the physical port index
5988 * @map: bitmap of which HW pattern filters to set
5989 * @mask0: byte mask for bytes 0-63 of a packet
5990 * @mask1: byte mask for bytes 64-127 of a packet
5991 * @crc: Ethernet CRC for selected bytes
5992 * @enable: enable/disable switch
5994 * Sets the pattern filters indicated in @map to mask out the bytes
5995 * specified in @mask0/@mask1 in received packets and compare the CRC of
5996 * the resulting packet against @crc. If @enable is %true pattern-based
5997 * WoL is enabled, otherwise disabled.
5999 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
6000 u64 mask0, u64 mask1, unsigned int crc, bool enable)
6006 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
6008 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
6011 t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
6017 #define EPIO_REG(name) \
6018 (is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \
6019 T5_PORT_REG(port, A_MAC_PORT_EPIO_##name))
6021 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
6022 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
6023 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
6025 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
6029 /* write byte masks */
6030 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
6031 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
6032 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
6033 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
6037 t4_write_reg(adap, EPIO_REG(DATA0), crc);
6038 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
6039 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
6040 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
6045 t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN);
6049 /* t4_mk_filtdelwr - create a delete filter WR
6050 * @ftid: the filter ID
6051 * @wr: the filter work request to populate
6052 * @qid: ingress queue to receive the delete notification
6054 * Creates a filter work request to delete the supplied filter. If @qid is
6055 * negative the delete notification is suppressed.
6057 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
6059 memset(wr, 0, sizeof(*wr));
6060 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
6061 wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
6062 wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
6063 V_FW_FILTER_WR_NOREPLY(qid < 0));
6064 wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
6066 wr->rx_chan_rx_rpl_iq =
6067 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
6070 #define INIT_CMD(var, cmd, rd_wr) do { \
6071 (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
6072 F_FW_CMD_REQUEST | \
6073 F_FW_CMD_##rd_wr); \
6074 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
6077 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
6081 struct fw_ldst_cmd c;
6083 memset(&c, 0, sizeof(c));
6084 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE);
6085 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6089 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6090 c.u.addrval.addr = cpu_to_be32(addr);
6091 c.u.addrval.val = cpu_to_be32(val);
6093 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6097 * t4_mdio_rd - read a PHY register through MDIO
6098 * @adap: the adapter
6099 * @mbox: mailbox to use for the FW command
6100 * @phy_addr: the PHY address
6101 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
6102 * @reg: the register to read
6103 * @valp: where to store the value
6105 * Issues a FW command through the given mailbox to read a PHY register.
6107 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6108 unsigned int mmd, unsigned int reg, unsigned int *valp)
6112 struct fw_ldst_cmd c;
6114 memset(&c, 0, sizeof(c));
6115 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
6116 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6117 F_FW_CMD_REQUEST | F_FW_CMD_READ |
6119 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6120 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
6121 V_FW_LDST_CMD_MMD(mmd));
6122 c.u.mdio.raddr = cpu_to_be16(reg);
6124 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6126 *valp = be16_to_cpu(c.u.mdio.rval);
6131 * t4_mdio_wr - write a PHY register through MDIO
6132 * @adap: the adapter
6133 * @mbox: mailbox to use for the FW command
6134 * @phy_addr: the PHY address
6135 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
6136 * @reg: the register to write
6137 * @valp: value to write
6139 * Issues a FW command through the given mailbox to write a PHY register.
6141 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6142 unsigned int mmd, unsigned int reg, unsigned int val)
6145 struct fw_ldst_cmd c;
6147 memset(&c, 0, sizeof(c));
6148 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
6149 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6150 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
6152 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6153 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
6154 V_FW_LDST_CMD_MMD(mmd));
6155 c.u.mdio.raddr = cpu_to_be16(reg);
6156 c.u.mdio.rval = cpu_to_be16(val);
6158 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6163 * t4_sge_decode_idma_state - decode the idma state
6164 * @adap: the adapter
6165 * @state: the state idma is stuck in
6167 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
6169 static const char * const t4_decode[] = {
6171 "IDMA_PUSH_MORE_CPL_FIFO",
6172 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6174 "IDMA_PHYSADDR_SEND_PCIEHDR",
6175 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6176 "IDMA_PHYSADDR_SEND_PAYLOAD",
6177 "IDMA_SEND_FIFO_TO_IMSG",
6178 "IDMA_FL_REQ_DATA_FL_PREP",
6179 "IDMA_FL_REQ_DATA_FL",
6181 "IDMA_FL_H_REQ_HEADER_FL",
6182 "IDMA_FL_H_SEND_PCIEHDR",
6183 "IDMA_FL_H_PUSH_CPL_FIFO",
6184 "IDMA_FL_H_SEND_CPL",
6185 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6186 "IDMA_FL_H_SEND_IP_HDR",
6187 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6188 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6189 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6190 "IDMA_FL_D_SEND_PCIEHDR",
6191 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6192 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6193 "IDMA_FL_SEND_PCIEHDR",
6194 "IDMA_FL_PUSH_CPL_FIFO",
6196 "IDMA_FL_SEND_PAYLOAD_FIRST",
6197 "IDMA_FL_SEND_PAYLOAD",
6198 "IDMA_FL_REQ_NEXT_DATA_FL",
6199 "IDMA_FL_SEND_NEXT_PCIEHDR",
6200 "IDMA_FL_SEND_PADDING",
6201 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6202 "IDMA_FL_SEND_FIFO_TO_IMSG",
6203 "IDMA_FL_REQ_DATAFL_DONE",
6204 "IDMA_FL_REQ_HEADERFL_DONE",
6206 static const char * const t5_decode[] = {
6209 "IDMA_PUSH_MORE_CPL_FIFO",
6210 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6211 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6212 "IDMA_PHYSADDR_SEND_PCIEHDR",
6213 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6214 "IDMA_PHYSADDR_SEND_PAYLOAD",
6215 "IDMA_SEND_FIFO_TO_IMSG",
6216 "IDMA_FL_REQ_DATA_FL",
6218 "IDMA_FL_DROP_SEND_INC",
6219 "IDMA_FL_H_REQ_HEADER_FL",
6220 "IDMA_FL_H_SEND_PCIEHDR",
6221 "IDMA_FL_H_PUSH_CPL_FIFO",
6222 "IDMA_FL_H_SEND_CPL",
6223 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6224 "IDMA_FL_H_SEND_IP_HDR",
6225 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6226 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6227 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6228 "IDMA_FL_D_SEND_PCIEHDR",
6229 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6230 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6231 "IDMA_FL_SEND_PCIEHDR",
6232 "IDMA_FL_PUSH_CPL_FIFO",
6234 "IDMA_FL_SEND_PAYLOAD_FIRST",
6235 "IDMA_FL_SEND_PAYLOAD",
6236 "IDMA_FL_REQ_NEXT_DATA_FL",
6237 "IDMA_FL_SEND_NEXT_PCIEHDR",
6238 "IDMA_FL_SEND_PADDING",
6239 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6241 static const char * const t6_decode[] = {
6243 "IDMA_PUSH_MORE_CPL_FIFO",
6244 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6245 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6246 "IDMA_PHYSADDR_SEND_PCIEHDR",
6247 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6248 "IDMA_PHYSADDR_SEND_PAYLOAD",
6249 "IDMA_FL_REQ_DATA_FL",
6251 "IDMA_FL_DROP_SEND_INC",
6252 "IDMA_FL_H_REQ_HEADER_FL",
6253 "IDMA_FL_H_SEND_PCIEHDR",
6254 "IDMA_FL_H_PUSH_CPL_FIFO",
6255 "IDMA_FL_H_SEND_CPL",
6256 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6257 "IDMA_FL_H_SEND_IP_HDR",
6258 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6259 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6260 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6261 "IDMA_FL_D_SEND_PCIEHDR",
6262 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6263 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6264 "IDMA_FL_SEND_PCIEHDR",
6265 "IDMA_FL_PUSH_CPL_FIFO",
6267 "IDMA_FL_SEND_PAYLOAD_FIRST",
6268 "IDMA_FL_SEND_PAYLOAD",
6269 "IDMA_FL_REQ_NEXT_DATA_FL",
6270 "IDMA_FL_SEND_NEXT_PCIEHDR",
6271 "IDMA_FL_SEND_PADDING",
6272 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6274 static const u32 sge_regs[] = {
6275 A_SGE_DEBUG_DATA_LOW_INDEX_2,
6276 A_SGE_DEBUG_DATA_LOW_INDEX_3,
6277 A_SGE_DEBUG_DATA_HIGH_INDEX_10,
6279 const char * const *sge_idma_decode;
6280 int sge_idma_decode_nstates;
6282 unsigned int chip_version = chip_id(adapter);
6284 /* Select the right set of decode strings to dump depending on the
6285 * adapter chip type.
6287 switch (chip_version) {
6289 sge_idma_decode = (const char * const *)t4_decode;
6290 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
6294 sge_idma_decode = (const char * const *)t5_decode;
6295 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
6299 sge_idma_decode = (const char * const *)t6_decode;
6300 sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
6304 CH_ERR(adapter, "Unsupported chip version %d\n", chip_version);
6308 if (state < sge_idma_decode_nstates)
6309 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
6311 CH_WARN(adapter, "idma state %d unknown\n", state);
6313 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
6314 CH_WARN(adapter, "SGE register %#x value %#x\n",
6315 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
6319 * t4_sge_ctxt_flush - flush the SGE context cache
6320 * @adap: the adapter
6321 * @mbox: mailbox to use for the FW command
6323 * Issues a FW command through the given mailbox to flush the
6324 * SGE context cache.
6326 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
6330 struct fw_ldst_cmd c;
6332 memset(&c, 0, sizeof(c));
6333 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC);
6334 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6335 F_FW_CMD_REQUEST | F_FW_CMD_READ |
6337 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6338 c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH);
6340 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6345 * t4_fw_hello - establish communication with FW
6346 * @adap: the adapter
6347 * @mbox: mailbox to use for the FW command
6348 * @evt_mbox: mailbox to receive async FW events
6349 * @master: specifies the caller's willingness to be the device master
6350 * @state: returns the current device state (if non-NULL)
6352 * Issues a command to establish communication with FW. Returns either
6353 * an error (negative integer) or the mailbox of the Master PF.
6355 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
6356 enum dev_master master, enum dev_state *state)
6359 struct fw_hello_cmd c;
6361 unsigned int master_mbox;
6362 int retries = FW_CMD_HELLO_RETRIES;
6365 memset(&c, 0, sizeof(c));
6366 INIT_CMD(c, HELLO, WRITE);
6367 c.err_to_clearinit = cpu_to_be32(
6368 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
6369 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
6370 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ?
6371 mbox : M_FW_HELLO_CMD_MBMASTER) |
6372 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
6373 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
6374 F_FW_HELLO_CMD_CLEARINIT);
6377 * Issue the HELLO command to the firmware. If it's not successful
6378 * but indicates that we got a "busy" or "timeout" condition, retry
6379 * the HELLO until we exhaust our retry limit. If we do exceed our
6380 * retry limit, check to see if the firmware left us any error
6381 * information and report that if so ...
6383 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6384 if (ret != FW_SUCCESS) {
6385 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
6387 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
6388 t4_report_fw_error(adap);
6392 v = be32_to_cpu(c.err_to_clearinit);
6393 master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
6395 if (v & F_FW_HELLO_CMD_ERR)
6396 *state = DEV_STATE_ERR;
6397 else if (v & F_FW_HELLO_CMD_INIT)
6398 *state = DEV_STATE_INIT;
6400 *state = DEV_STATE_UNINIT;
6404 * If we're not the Master PF then we need to wait around for the
6405 * Master PF Driver to finish setting up the adapter.
6407 * Note that we also do this wait if we're a non-Master-capable PF and
6408 * there is no current Master PF; a Master PF may show up momentarily
6409 * and we wouldn't want to fail pointlessly. (This can happen when an
6410 * OS loads lots of different drivers rapidly at the same time). In
6411 * this case, the Master PF returned by the firmware will be
6412 * M_PCIE_FW_MASTER so the test below will work ...
6414 if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
6415 master_mbox != mbox) {
6416 int waiting = FW_CMD_HELLO_TIMEOUT;
6419 * Wait for the firmware to either indicate an error or
6420 * initialized state. If we see either of these we bail out
6421 * and report the issue to the caller. If we exhaust the
6422 * "hello timeout" and we haven't exhausted our retries, try
6423 * again. Otherwise bail with a timeout error.
6432 * If neither Error nor Initialialized are indicated
6433 * by the firmware keep waiting till we exhaust our
6434 * timeout ... and then retry if we haven't exhausted
6437 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
6438 if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
6449 * We either have an Error or Initialized condition
6450 * report errors preferentially.
6453 if (pcie_fw & F_PCIE_FW_ERR)
6454 *state = DEV_STATE_ERR;
6455 else if (pcie_fw & F_PCIE_FW_INIT)
6456 *state = DEV_STATE_INIT;
6460 * If we arrived before a Master PF was selected and
6461 * there's not a valid Master PF, grab its identity
6464 if (master_mbox == M_PCIE_FW_MASTER &&
6465 (pcie_fw & F_PCIE_FW_MASTER_VLD))
6466 master_mbox = G_PCIE_FW_MASTER(pcie_fw);
6475 * t4_fw_bye - end communication with FW
6476 * @adap: the adapter
6477 * @mbox: mailbox to use for the FW command
6479 * Issues a command to terminate communication with FW.
6481 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
6483 struct fw_bye_cmd c;
6485 memset(&c, 0, sizeof(c));
6486 INIT_CMD(c, BYE, WRITE);
6487 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6491 * t4_fw_reset - issue a reset to FW
6492 * @adap: the adapter
6493 * @mbox: mailbox to use for the FW command
6494 * @reset: specifies the type of reset to perform
6496 * Issues a reset command of the specified type to FW.
6498 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
6500 struct fw_reset_cmd c;
6502 memset(&c, 0, sizeof(c));
6503 INIT_CMD(c, RESET, WRITE);
6504 c.val = cpu_to_be32(reset);
6505 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6509 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
6510 * @adap: the adapter
6511 * @mbox: mailbox to use for the FW RESET command (if desired)
6512 * @force: force uP into RESET even if FW RESET command fails
6514 * Issues a RESET command to firmware (if desired) with a HALT indication
6515 * and then puts the microprocessor into RESET state. The RESET command
6516 * will only be issued if a legitimate mailbox is provided (mbox <=
6517 * M_PCIE_FW_MASTER).
6519 * This is generally used in order for the host to safely manipulate the
6520 * adapter without fear of conflicting with whatever the firmware might
6521 * be doing. The only way out of this state is to RESTART the firmware
6524 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
6529 * If a legitimate mailbox is provided, issue a RESET command
6530 * with a HALT indication.
6532 if (mbox <= M_PCIE_FW_MASTER) {
6533 struct fw_reset_cmd c;
6535 memset(&c, 0, sizeof(c));
6536 INIT_CMD(c, RESET, WRITE);
6537 c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
6538 c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
6539 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6543 * Normally we won't complete the operation if the firmware RESET
6544 * command fails but if our caller insists we'll go ahead and put the
6545 * uP into RESET. This can be useful if the firmware is hung or even
6546 * missing ... We'll have to take the risk of putting the uP into
6547 * RESET without the cooperation of firmware in that case.
6549 * We also force the firmware's HALT flag to be on in case we bypassed
6550 * the firmware RESET command above or we're dealing with old firmware
6551 * which doesn't have the HALT capability. This will serve as a flag
6552 * for the incoming firmware to know that it's coming out of a HALT
6553 * rather than a RESET ... if it's new enough to understand that ...
6555 if (ret == 0 || force) {
6556 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
6557 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
6562 * And we always return the result of the firmware RESET command
6563 * even when we force the uP into RESET ...
6569 * t4_fw_restart - restart the firmware by taking the uP out of RESET
6570 * @adap: the adapter
6571 * @reset: if we want to do a RESET to restart things
6573 * Restart firmware previously halted by t4_fw_halt(). On successful
6574 * return the previous PF Master remains as the new PF Master and there
6575 * is no need to issue a new HELLO command, etc.
6577 * We do this in two ways:
6579 * 1. If we're dealing with newer firmware we'll simply want to take
6580 * the chip's microprocessor out of RESET. This will cause the
6581 * firmware to start up from its start vector. And then we'll loop
6582 * until the firmware indicates it's started again (PCIE_FW.HALT
6583 * reset to 0) or we timeout.
6585 * 2. If we're dealing with older firmware then we'll need to RESET
6586 * the chip since older firmware won't recognize the PCIE_FW.HALT
6587 * flag and automatically RESET itself on startup.
6589 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
6593 * Since we're directing the RESET instead of the firmware
6594 * doing it automatically, we need to clear the PCIE_FW.HALT
6597 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
6600 * If we've been given a valid mailbox, first try to get the
6601 * firmware to do the RESET. If that works, great and we can
6602 * return success. Otherwise, if we haven't been given a
6603 * valid mailbox or the RESET command failed, fall back to
6604 * hitting the chip with a hammer.
6606 if (mbox <= M_PCIE_FW_MASTER) {
6607 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
6609 if (t4_fw_reset(adap, mbox,
6610 F_PIORST | F_PIORSTMODE) == 0)
6614 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
6619 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
6620 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
6621 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
6632 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
6633 * @adap: the adapter
6634 * @mbox: mailbox to use for the FW RESET command (if desired)
6635 * @fw_data: the firmware image to write
6637 * @force: force upgrade even if firmware doesn't cooperate
6639 * Perform all of the steps necessary for upgrading an adapter's
6640 * firmware image. Normally this requires the cooperation of the
6641 * existing firmware in order to halt all existing activities
6642 * but if an invalid mailbox token is passed in we skip that step
6643 * (though we'll still put the adapter microprocessor into RESET in
6646 * On successful return the new firmware will have been loaded and
6647 * the adapter will have been fully RESET losing all previous setup
6648 * state. On unsuccessful return the adapter may be completely hosed ...
6649 * positive errno indicates that the adapter is ~probably~ intact, a
6650 * negative errno indicates that things are looking bad ...
6652 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
6653 const u8 *fw_data, unsigned int size, int force)
6655 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
6656 unsigned int bootstrap =
6657 be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
6660 if (!t4_fw_matches_chip(adap, fw_hdr))
6664 ret = t4_fw_halt(adap, mbox, force);
6665 if (ret < 0 && !force)
6669 ret = t4_load_fw(adap, fw_data, size);
6670 if (ret < 0 || bootstrap)
6674 * Older versions of the firmware don't understand the new
6675 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
6676 * restart. So for newly loaded older firmware we'll have to do the
6677 * RESET for it so it starts up on a clean slate. We can tell if
6678 * the newly loaded firmware will handle this right by checking
6679 * its header flags to see if it advertises the capability.
6681 reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
6682 return t4_fw_restart(adap, mbox, reset);
6686 * t4_fw_initialize - ask FW to initialize the device
6687 * @adap: the adapter
6688 * @mbox: mailbox to use for the FW command
6690 * Issues a command to FW to partially initialize the device. This
6691 * performs initialization that generally doesn't depend on user input.
6693 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
6695 struct fw_initialize_cmd c;
6697 memset(&c, 0, sizeof(c));
6698 INIT_CMD(c, INITIALIZE, WRITE);
6699 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6703 * t4_query_params_rw - query FW or device parameters
6704 * @adap: the adapter
6705 * @mbox: mailbox to use for the FW command
6708 * @nparams: the number of parameters
6709 * @params: the parameter names
6710 * @val: the parameter values
6711 * @rw: Write and read flag
6713 * Reads the value of FW or device parameters. Up to 7 parameters can be
6716 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
6717 unsigned int vf, unsigned int nparams, const u32 *params,
6721 struct fw_params_cmd c;
6722 __be32 *p = &c.param[0].mnem;
6727 memset(&c, 0, sizeof(c));
6728 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
6729 F_FW_CMD_REQUEST | F_FW_CMD_READ |
6730 V_FW_PARAMS_CMD_PFN(pf) |
6731 V_FW_PARAMS_CMD_VFN(vf));
6732 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6734 for (i = 0; i < nparams; i++) {
6735 *p++ = cpu_to_be32(*params++);
6737 *p = cpu_to_be32(*(val + i));
6741 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6743 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
6744 *val++ = be32_to_cpu(*p);
6748 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
6749 unsigned int vf, unsigned int nparams, const u32 *params,
6752 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
6756 * t4_set_params_timeout - sets FW or device parameters
6757 * @adap: the adapter
6758 * @mbox: mailbox to use for the FW command
6761 * @nparams: the number of parameters
6762 * @params: the parameter names
6763 * @val: the parameter values
6764 * @timeout: the timeout time
6766 * Sets the value of FW or device parameters. Up to 7 parameters can be
6767 * specified at once.
6769 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
6770 unsigned int pf, unsigned int vf,
6771 unsigned int nparams, const u32 *params,
6772 const u32 *val, int timeout)
6774 struct fw_params_cmd c;
6775 __be32 *p = &c.param[0].mnem;
6780 memset(&c, 0, sizeof(c));
6781 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
6782 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
6783 V_FW_PARAMS_CMD_PFN(pf) |
6784 V_FW_PARAMS_CMD_VFN(vf));
6785 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6788 *p++ = cpu_to_be32(*params++);
6789 *p++ = cpu_to_be32(*val++);
6792 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
6796 * t4_set_params - sets FW or device parameters
6797 * @adap: the adapter
6798 * @mbox: mailbox to use for the FW command
6801 * @nparams: the number of parameters
6802 * @params: the parameter names
6803 * @val: the parameter values
6805 * Sets the value of FW or device parameters. Up to 7 parameters can be
6806 * specified at once.
6808 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
6809 unsigned int vf, unsigned int nparams, const u32 *params,
6812 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
6813 FW_CMD_MAX_TIMEOUT);
6817 * t4_cfg_pfvf - configure PF/VF resource limits
6818 * @adap: the adapter
6819 * @mbox: mailbox to use for the FW command
6820 * @pf: the PF being configured
6821 * @vf: the VF being configured
6822 * @txq: the max number of egress queues
6823 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
6824 * @rxqi: the max number of interrupt-capable ingress queues
6825 * @rxq: the max number of interruptless ingress queues
6826 * @tc: the PCI traffic class
6827 * @vi: the max number of virtual interfaces
6828 * @cmask: the channel access rights mask for the PF/VF
6829 * @pmask: the port access rights mask for the PF/VF
6830 * @nexact: the maximum number of exact MPS filters
6831 * @rcaps: read capabilities
6832 * @wxcaps: write/execute capabilities
6834 * Configures resource limits and capabilities for a physical or virtual
6837 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
6838 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
6839 unsigned int rxqi, unsigned int rxq, unsigned int tc,
6840 unsigned int vi, unsigned int cmask, unsigned int pmask,
6841 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
6843 struct fw_pfvf_cmd c;
6845 memset(&c, 0, sizeof(c));
6846 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
6847 F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
6848 V_FW_PFVF_CMD_VFN(vf));
6849 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6850 c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
6851 V_FW_PFVF_CMD_NIQ(rxq));
6852 c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) |
6853 V_FW_PFVF_CMD_PMASK(pmask) |
6854 V_FW_PFVF_CMD_NEQ(txq));
6855 c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) |
6856 V_FW_PFVF_CMD_NVI(vi) |
6857 V_FW_PFVF_CMD_NEXACTF(nexact));
6858 c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) |
6859 V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
6860 V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
6861 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6865 * t4_alloc_vi_func - allocate a virtual interface
6866 * @adap: the adapter
6867 * @mbox: mailbox to use for the FW command
6868 * @port: physical port associated with the VI
6869 * @pf: the PF owning the VI
6870 * @vf: the VF owning the VI
6871 * @nmac: number of MAC addresses needed (1 to 5)
6872 * @mac: the MAC addresses of the VI
6873 * @rss_size: size of RSS table slice associated with this VI
6874 * @portfunc: which Port Application Function MAC Address is desired
6875 * @idstype: Intrusion Detection Type
6877 * Allocates a virtual interface for the given physical port. If @mac is
6878 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
6879 * If @rss_size is %NULL the VI is not assigned any RSS slice by FW.
6880 * @mac should be large enough to hold @nmac Ethernet addresses, they are
6881 * stored consecutively so the space needed is @nmac * 6 bytes.
6882 * Returns a negative error number or the non-negative VI id.
6884 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
6885 unsigned int port, unsigned int pf, unsigned int vf,
6886 unsigned int nmac, u8 *mac, u16 *rss_size,
6887 unsigned int portfunc, unsigned int idstype)
6892 memset(&c, 0, sizeof(c));
6893 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
6894 F_FW_CMD_WRITE | F_FW_CMD_EXEC |
6895 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
6896 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
6897 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
6898 V_FW_VI_CMD_FUNC(portfunc));
6899 c.portid_pkd = V_FW_VI_CMD_PORTID(port);
6902 c.norss_rsssize = F_FW_VI_CMD_NORSS;
6904 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6909 memcpy(mac, c.mac, sizeof(c.mac));
6912 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
6914 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
6916 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
6918 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
6922 *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
6923 return G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid));
6927 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface
6928 * @adap: the adapter
6929 * @mbox: mailbox to use for the FW command
6930 * @port: physical port associated with the VI
6931 * @pf: the PF owning the VI
6932 * @vf: the VF owning the VI
6933 * @nmac: number of MAC addresses needed (1 to 5)
6934 * @mac: the MAC addresses of the VI
6935 * @rss_size: size of RSS table slice associated with this VI
6937 * backwards compatible and convieniance routine to allocate a Virtual
6938 * Interface with a Ethernet Port Application Function and Intrustion
6939 * Detection System disabled.
6941 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
6942 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
6945 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
6950 * t4_free_vi - free a virtual interface
6951 * @adap: the adapter
6952 * @mbox: mailbox to use for the FW command
6953 * @pf: the PF owning the VI
6954 * @vf: the VF owning the VI
6955 * @viid: virtual interface identifiler
6957 * Free a previously allocated virtual interface.
6959 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
6960 unsigned int vf, unsigned int viid)
6964 memset(&c, 0, sizeof(c));
6965 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
6968 V_FW_VI_CMD_PFN(pf) |
6969 V_FW_VI_CMD_VFN(vf));
6970 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
6971 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
6973 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6977 * t4_set_rxmode - set Rx properties of a virtual interface
6978 * @adap: the adapter
6979 * @mbox: mailbox to use for the FW command
6981 * @mtu: the new MTU or -1
6982 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
6983 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
6984 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
6985 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
6986 * @sleep_ok: if true we may sleep while awaiting command completion
6988 * Sets Rx properties of a virtual interface.
6990 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
6991 int mtu, int promisc, int all_multi, int bcast, int vlanex,
6994 struct fw_vi_rxmode_cmd c;
6996 /* convert to FW values */
6998 mtu = M_FW_VI_RXMODE_CMD_MTU;
7000 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
7002 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
7004 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
7006 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
7008 memset(&c, 0, sizeof(c));
7009 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
7010 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7011 V_FW_VI_RXMODE_CMD_VIID(viid));
7012 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7014 cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
7015 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
7016 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
7017 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
7018 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
7019 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7023 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
7024 * @adap: the adapter
7025 * @mbox: mailbox to use for the FW command
7027 * @free: if true any existing filters for this VI id are first removed
7028 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
7029 * @addr: the MAC address(es)
7030 * @idx: where to store the index of each allocated filter
7031 * @hash: pointer to hash address filter bitmap
7032 * @sleep_ok: call is allowed to sleep
7034 * Allocates an exact-match filter for each of the supplied addresses and
7035 * sets it to the corresponding address. If @idx is not %NULL it should
7036 * have at least @naddr entries, each of which will be set to the index of
7037 * the filter allocated for the corresponding MAC address. If a filter
7038 * could not be allocated for an address its index is set to 0xffff.
7039 * If @hash is not %NULL addresses that fail to allocate an exact filter
7040 * are hashed and update the hash filter bitmap pointed at by @hash.
7042 * Returns a negative error number or the number of filters allocated.
7044 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
7045 unsigned int viid, bool free, unsigned int naddr,
7046 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
7048 int offset, ret = 0;
7049 struct fw_vi_mac_cmd c;
7050 unsigned int nfilters = 0;
7051 unsigned int max_naddr = adap->chip_params->mps_tcam_size;
7052 unsigned int rem = naddr;
7054 if (naddr > max_naddr)
7057 for (offset = 0; offset < naddr ; /**/) {
7058 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
7060 : ARRAY_SIZE(c.u.exact));
7061 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
7062 u.exact[fw_naddr]), 16);
7063 struct fw_vi_mac_exact *p;
7066 memset(&c, 0, sizeof(c));
7067 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
7070 V_FW_CMD_EXEC(free) |
7071 V_FW_VI_MAC_CMD_VIID(viid));
7072 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) |
7073 V_FW_CMD_LEN16(len16));
7075 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7077 cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
7078 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
7079 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
7083 * It's okay if we run out of space in our MAC address arena.
7084 * Some of the addresses we submit may get stored so we need
7085 * to run through the reply to see what the results were ...
7087 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7088 if (ret && ret != -FW_ENOMEM)
7091 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7092 u16 index = G_FW_VI_MAC_CMD_IDX(
7093 be16_to_cpu(p->valid_to_idx));
7096 idx[offset+i] = (index >= max_naddr
7099 if (index < max_naddr)
7102 *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
7110 if (ret == 0 || ret == -FW_ENOMEM)
7116 * t4_change_mac - modifies the exact-match filter for a MAC address
7117 * @adap: the adapter
7118 * @mbox: mailbox to use for the FW command
7120 * @idx: index of existing filter for old value of MAC address, or -1
7121 * @addr: the new MAC address value
7122 * @persist: whether a new MAC allocation should be persistent
7123 * @add_smt: if true also add the address to the HW SMT
7125 * Modifies an exact-match filter and sets it to the new MAC address if
7126 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
7127 * latter case the address is added persistently if @persist is %true.
7129 * Note that in general it is not possible to modify the value of a given
7130 * filter so the generic way to modify an address filter is to free the one
7131 * being used by the old address value and allocate a new filter for the
7132 * new address value.
7134 * Returns a negative error number or the index of the filter with the new
7135 * MAC value. Note that this index may differ from @idx.
7137 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
7138 int idx, const u8 *addr, bool persist, bool add_smt)
7141 struct fw_vi_mac_cmd c;
7142 struct fw_vi_mac_exact *p = c.u.exact;
7143 unsigned int max_mac_addr = adap->chip_params->mps_tcam_size;
7145 if (idx < 0) /* new allocation */
7146 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
7147 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
7149 memset(&c, 0, sizeof(c));
7150 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
7151 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7152 V_FW_VI_MAC_CMD_VIID(viid));
7153 c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1));
7154 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
7155 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
7156 V_FW_VI_MAC_CMD_IDX(idx));
7157 memcpy(p->macaddr, addr, sizeof(p->macaddr));
7159 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7161 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
7162 if (ret >= max_mac_addr)
7169 * t4_set_addr_hash - program the MAC inexact-match hash filter
7170 * @adap: the adapter
7171 * @mbox: mailbox to use for the FW command
7173 * @ucast: whether the hash filter should also match unicast addresses
7174 * @vec: the value to be written to the hash filter
7175 * @sleep_ok: call is allowed to sleep
7177 * Sets the 64-bit inexact-match hash filter for a virtual interface.
7179 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
7180 bool ucast, u64 vec, bool sleep_ok)
7182 struct fw_vi_mac_cmd c;
7185 memset(&c, 0, sizeof(c));
7186 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
7187 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7188 V_FW_VI_ENABLE_CMD_VIID(viid));
7189 val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) |
7190 V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1);
7191 c.freemacs_to_len16 = cpu_to_be32(val);
7192 c.u.hash.hashvec = cpu_to_be64(vec);
7193 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7197 * t4_enable_vi_params - enable/disable a virtual interface
7198 * @adap: the adapter
7199 * @mbox: mailbox to use for the FW command
7201 * @rx_en: 1=enable Rx, 0=disable Rx
7202 * @tx_en: 1=enable Tx, 0=disable Tx
7203 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
7205 * Enables/disables a virtual interface. Note that setting DCB Enable
7206 * only makes sense when enabling a Virtual Interface ...
7208 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
7209 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
7211 struct fw_vi_enable_cmd c;
7213 memset(&c, 0, sizeof(c));
7214 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
7215 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7216 V_FW_VI_ENABLE_CMD_VIID(viid));
7217 c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
7218 V_FW_VI_ENABLE_CMD_EEN(tx_en) |
7219 V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
7221 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
7225 * t4_enable_vi - enable/disable a virtual interface
7226 * @adap: the adapter
7227 * @mbox: mailbox to use for the FW command
7229 * @rx_en: 1=enable Rx, 0=disable Rx
7230 * @tx_en: 1=enable Tx, 0=disable Tx
7232 * Enables/disables a virtual interface. Note that setting DCB Enable
7233 * only makes sense when enabling a Virtual Interface ...
7235 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
7236 bool rx_en, bool tx_en)
7238 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
7242 * t4_identify_port - identify a VI's port by blinking its LED
7243 * @adap: the adapter
7244 * @mbox: mailbox to use for the FW command
7246 * @nblinks: how many times to blink LED at 2.5 Hz
7248 * Identifies a VI's port by blinking its LED.
7250 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
7251 unsigned int nblinks)
7253 struct fw_vi_enable_cmd c;
7255 memset(&c, 0, sizeof(c));
7256 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
7257 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7258 V_FW_VI_ENABLE_CMD_VIID(viid));
7259 c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
7260 c.blinkdur = cpu_to_be16(nblinks);
7261 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7265 * t4_iq_stop - stop an ingress queue and its FLs
7266 * @adap: the adapter
7267 * @mbox: mailbox to use for the FW command
7268 * @pf: the PF owning the queues
7269 * @vf: the VF owning the queues
7270 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
7271 * @iqid: ingress queue id
7272 * @fl0id: FL0 queue id or 0xffff if no attached FL0
7273 * @fl1id: FL1 queue id or 0xffff if no attached FL1
7275 * Stops an ingress queue and its associated FLs, if any. This causes
7276 * any current or future data/messages destined for these queues to be
7279 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
7280 unsigned int vf, unsigned int iqtype, unsigned int iqid,
7281 unsigned int fl0id, unsigned int fl1id)
7285 memset(&c, 0, sizeof(c));
7286 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
7287 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
7288 V_FW_IQ_CMD_VFN(vf));
7289 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c));
7290 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
7291 c.iqid = cpu_to_be16(iqid);
7292 c.fl0id = cpu_to_be16(fl0id);
7293 c.fl1id = cpu_to_be16(fl1id);
7294 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7298 * t4_iq_free - free an ingress queue and its FLs
7299 * @adap: the adapter
7300 * @mbox: mailbox to use for the FW command
7301 * @pf: the PF owning the queues
7302 * @vf: the VF owning the queues
7303 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
7304 * @iqid: ingress queue id
7305 * @fl0id: FL0 queue id or 0xffff if no attached FL0
7306 * @fl1id: FL1 queue id or 0xffff if no attached FL1
7308 * Frees an ingress queue and its associated FLs, if any.
7310 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7311 unsigned int vf, unsigned int iqtype, unsigned int iqid,
7312 unsigned int fl0id, unsigned int fl1id)
7316 memset(&c, 0, sizeof(c));
7317 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
7318 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
7319 V_FW_IQ_CMD_VFN(vf));
7320 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
7321 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
7322 c.iqid = cpu_to_be16(iqid);
7323 c.fl0id = cpu_to_be16(fl0id);
7324 c.fl1id = cpu_to_be16(fl1id);
7325 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7329 * t4_eth_eq_free - free an Ethernet egress queue
7330 * @adap: the adapter
7331 * @mbox: mailbox to use for the FW command
7332 * @pf: the PF owning the queue
7333 * @vf: the VF owning the queue
7334 * @eqid: egress queue id
7336 * Frees an Ethernet egress queue.
7338 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7339 unsigned int vf, unsigned int eqid)
7341 struct fw_eq_eth_cmd c;
7343 memset(&c, 0, sizeof(c));
7344 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
7345 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7346 V_FW_EQ_ETH_CMD_PFN(pf) |
7347 V_FW_EQ_ETH_CMD_VFN(vf));
7348 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
7349 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
7350 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7354 * t4_ctrl_eq_free - free a control egress queue
7355 * @adap: the adapter
7356 * @mbox: mailbox to use for the FW command
7357 * @pf: the PF owning the queue
7358 * @vf: the VF owning the queue
7359 * @eqid: egress queue id
7361 * Frees a control egress queue.
7363 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7364 unsigned int vf, unsigned int eqid)
7366 struct fw_eq_ctrl_cmd c;
7368 memset(&c, 0, sizeof(c));
7369 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
7370 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7371 V_FW_EQ_CTRL_CMD_PFN(pf) |
7372 V_FW_EQ_CTRL_CMD_VFN(vf));
7373 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
7374 c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
7375 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7379 * t4_ofld_eq_free - free an offload egress queue
7380 * @adap: the adapter
7381 * @mbox: mailbox to use for the FW command
7382 * @pf: the PF owning the queue
7383 * @vf: the VF owning the queue
7384 * @eqid: egress queue id
7386 * Frees a control egress queue.
7388 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7389 unsigned int vf, unsigned int eqid)
7391 struct fw_eq_ofld_cmd c;
7393 memset(&c, 0, sizeof(c));
7394 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) |
7395 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7396 V_FW_EQ_OFLD_CMD_PFN(pf) |
7397 V_FW_EQ_OFLD_CMD_VFN(vf));
7398 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
7399 c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid));
7400 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7404 * t4_link_down_rc_str - return a string for a Link Down Reason Code
7405 * @link_down_rc: Link Down Reason Code
7407 * Returns a string representation of the Link Down Reason Code.
7409 const char *t4_link_down_rc_str(unsigned char link_down_rc)
7411 static const char *reason[] = {
7414 "Auto-negotiation Failure",
7416 "Insufficient Airflow",
7417 "Unable To Determine Reason",
7418 "No RX Signal Detected",
7422 if (link_down_rc >= ARRAY_SIZE(reason))
7423 return "Bad Reason Code";
7425 return reason[link_down_rc];
7429 * t4_handle_fw_rpl - process a FW reply message
7430 * @adap: the adapter
7431 * @rpl: start of the FW message
7433 * Processes a FW message, such as link state change messages.
7435 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
7437 u8 opcode = *(const u8 *)rpl;
7438 const struct fw_port_cmd *p = (const void *)rpl;
7439 unsigned int action =
7440 G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
7442 if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
7443 /* link/module state change message */
7444 int speed = 0, fc = 0, i;
7445 int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
7446 struct port_info *pi = NULL;
7447 struct link_config *lc;
7448 u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
7449 int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
7450 u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
7452 if (stat & F_FW_PORT_CMD_RXPAUSE)
7454 if (stat & F_FW_PORT_CMD_TXPAUSE)
7456 if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
7458 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
7460 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
7462 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
7465 for_each_port(adap, i) {
7466 pi = adap2pinfo(adap, i);
7467 if (pi->tx_chan == chan)
7472 if (mod != pi->mod_type) {
7474 t4_os_portmod_changed(adap, i);
7476 if (link_ok != lc->link_ok || speed != lc->speed ||
7477 fc != lc->fc) { /* something changed */
7480 if (!link_ok && lc->link_ok)
7481 reason = G_FW_PORT_CMD_LINKDNRC(stat);
7485 lc->link_ok = link_ok;
7488 lc->supported = be16_to_cpu(p->u.info.pcap);
7489 t4_os_link_changed(adap, i, link_ok, reason);
7492 CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode);
7499 * get_pci_mode - determine a card's PCI mode
7500 * @adapter: the adapter
7501 * @p: where to store the PCI settings
7503 * Determines a card's PCI mode and associated parameters, such as speed
7506 static void get_pci_mode(struct adapter *adapter,
7507 struct pci_params *p)
7512 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
7514 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
7515 p->speed = val & PCI_EXP_LNKSTA_CLS;
7516 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
7521 * init_link_config - initialize a link's SW state
7522 * @lc: structure holding the link state
7523 * @caps: link capabilities
7525 * Initializes the SW state maintained for each link, including the link's
7526 * capabilities and default speed/flow-control/autonegotiation settings.
7528 static void init_link_config(struct link_config *lc, unsigned int caps)
7530 lc->supported = caps;
7531 lc->requested_speed = 0;
7533 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
7534 if (lc->supported & FW_PORT_CAP_ANEG) {
7535 lc->advertising = lc->supported & ADVERT_MASK;
7536 lc->autoneg = AUTONEG_ENABLE;
7537 lc->requested_fc |= PAUSE_AUTONEG;
7539 lc->advertising = 0;
7540 lc->autoneg = AUTONEG_DISABLE;
7545 u32 vendor_and_model_id;
7549 int t4_get_flash_params(struct adapter *adapter)
7552 * Table for non-Numonix supported flash parts. Numonix parts are left
7553 * to the preexisting well-tested code. All flash parts have 64KB
7556 static struct flash_desc supported_flash[] = {
7557 { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
7563 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
7565 ret = sf1_read(adapter, 3, 0, 1, &info);
7566 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
7570 for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret)
7571 if (supported_flash[ret].vendor_and_model_id == info) {
7572 adapter->params.sf_size = supported_flash[ret].size_mb;
7573 adapter->params.sf_nsec =
7574 adapter->params.sf_size / SF_SEC_SIZE;
7578 if ((info & 0xff) != 0x20) /* not a Numonix flash */
7580 info >>= 16; /* log2 of size */
7581 if (info >= 0x14 && info < 0x18)
7582 adapter->params.sf_nsec = 1 << (info - 16);
7583 else if (info == 0x18)
7584 adapter->params.sf_nsec = 64;
7587 adapter->params.sf_size = 1 << info;
7590 * We should ~probably~ reject adapters with FLASHes which are too
7591 * small but we have some legacy FPGAs with small FLASHes that we'd
7592 * still like to use. So instead we emit a scary message ...
7594 if (adapter->params.sf_size < FLASH_MIN_SIZE)
7595 CH_WARN(adapter, "WARNING!!! FLASH size %#x < %#x!!!\n",
7596 adapter->params.sf_size, FLASH_MIN_SIZE);
7601 static void set_pcie_completion_timeout(struct adapter *adapter,
7607 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
7609 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
7612 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
7616 const struct chip_params *t4_get_chip_params(int chipid)
7618 static const struct chip_params chip_params[] = {
7622 .pm_stats_cnt = PM_NSTATS,
7623 .cng_ch_bits_log = 2,
7625 .cim_num_obq = CIM_NUM_OBQ,
7626 .mps_rplc_size = 128,
7628 .sge_fl_db = F_DBPRIO,
7629 .mps_tcam_size = NUM_MPS_CLS_SRAM_L_INSTANCES,
7634 .pm_stats_cnt = PM_NSTATS,
7635 .cng_ch_bits_log = 2,
7637 .cim_num_obq = CIM_NUM_OBQ_T5,
7638 .mps_rplc_size = 128,
7640 .sge_fl_db = F_DBPRIO | F_DBTYPE,
7641 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
7646 .pm_stats_cnt = T6_PM_NSTATS,
7647 .cng_ch_bits_log = 3,
7649 .cim_num_obq = CIM_NUM_OBQ_T5,
7650 .mps_rplc_size = 256,
7653 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
7657 chipid -= CHELSIO_T4;
7658 if (chipid < 0 || chipid >= ARRAY_SIZE(chip_params))
7661 return &chip_params[chipid];
7665 * t4_prep_adapter - prepare SW and HW for operation
7666 * @adapter: the adapter
7667 * @buf: temporary space of at least VPD_LEN size provided by the caller.
7669 * Initialize adapter SW state for the various HW modules, set initial
7670 * values for some adapter tunables, take PHYs out of reset, and
7671 * initialize the MDIO interface.
7673 int t4_prep_adapter(struct adapter *adapter, u8 *buf)
7679 get_pci_mode(adapter, &adapter->params.pci);
7681 pl_rev = t4_read_reg(adapter, A_PL_REV);
7682 adapter->params.chipid = G_CHIPID(pl_rev);
7683 adapter->params.rev = G_REV(pl_rev);
7684 if (adapter->params.chipid == 0) {
7685 /* T4 did not have chipid in PL_REV (T5 onwards do) */
7686 adapter->params.chipid = CHELSIO_T4;
7688 /* T4A1 chip is not supported */
7689 if (adapter->params.rev == 1) {
7690 CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n");
7695 adapter->chip_params = t4_get_chip_params(chip_id(adapter));
7696 if (adapter->chip_params == NULL)
7699 adapter->params.pci.vpd_cap_addr =
7700 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
7702 ret = t4_get_flash_params(adapter);
7706 ret = get_vpd_params(adapter, &adapter->params.vpd, buf);
7710 /* Cards with real ASICs have the chipid in the PCIe device id */
7711 t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id);
7712 if (device_id >> 12 == chip_id(adapter))
7713 adapter->params.cim_la_size = CIMLA_SIZE;
7716 adapter->params.fpga = 1;
7717 adapter->params.cim_la_size = 2 * CIMLA_SIZE;
7720 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
7723 * Default port and clock for debugging in case we can't reach FW.
7725 adapter->params.nports = 1;
7726 adapter->params.portvec = 1;
7727 adapter->params.vpd.cclk = 50000;
7729 /* Set pci completion timeout value to 4 seconds. */
7730 set_pcie_completion_timeout(adapter, 0xd);
7735 * t4_shutdown_adapter - shut down adapter, host & wire
7736 * @adapter: the adapter
7738 * Perform an emergency shutdown of the adapter and stop it from
7739 * continuing any further communication on the ports or DMA to the
7740 * host. This is typically used when the adapter and/or firmware
7741 * have crashed and we want to prevent any further accidental
7742 * communication with the rest of the world. This will also force
7743 * the port Link Status to go down -- if register writes work --
7744 * which should help our peers figure out that we're down.
7746 int t4_shutdown_adapter(struct adapter *adapter)
7750 t4_intr_disable(adapter);
7751 t4_write_reg(adapter, A_DBG_GPIO_EN, 0);
7752 for_each_port(adapter, port) {
7753 u32 a_port_cfg = PORT_REG(port,
7758 t4_write_reg(adapter, a_port_cfg,
7759 t4_read_reg(adapter, a_port_cfg)
7760 & ~V_SIGNAL_DET(1));
7762 t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0);
7768 * t4_init_devlog_params - initialize adapter->params.devlog
7769 * @adap: the adapter
7770 * @fw_attach: whether we can talk to the firmware
7772 * Initialize various fields of the adapter's Firmware Device Log
7773 * Parameters structure.
7775 int t4_init_devlog_params(struct adapter *adap, int fw_attach)
7777 struct devlog_params *dparams = &adap->params.devlog;
7779 unsigned int devlog_meminfo;
7780 struct fw_devlog_cmd devlog_cmd;
7783 /* If we're dealing with newer firmware, the Device Log Paramerters
7784 * are stored in a designated register which allows us to access the
7785 * Device Log even if we can't talk to the firmware.
7788 t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG));
7790 unsigned int nentries, nentries128;
7792 dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams);
7793 dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4;
7795 nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams);
7796 nentries = (nentries128 + 1) * 128;
7797 dparams->size = nentries * sizeof(struct fw_devlog_e);
7803 * For any failing returns ...
7805 memset(dparams, 0, sizeof *dparams);
7808 * If we can't talk to the firmware, there's really nothing we can do
7814 /* Otherwise, ask the firmware for it's Device Log Parameters.
7816 memset(&devlog_cmd, 0, sizeof devlog_cmd);
7817 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
7818 F_FW_CMD_REQUEST | F_FW_CMD_READ);
7819 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
7820 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
7826 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
7827 dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo);
7828 dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4;
7829 dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
7835 * t4_init_sge_params - initialize adap->params.sge
7836 * @adapter: the adapter
7838 * Initialize various fields of the adapter's SGE Parameters structure.
7840 int t4_init_sge_params(struct adapter *adapter)
7843 struct sge_params *sp = &adapter->params.sge;
7846 r = t4_read_reg(adapter, A_SGE_INGRESS_RX_THRESHOLD);
7847 sp->counter_val[0] = G_THRESHOLD_0(r);
7848 sp->counter_val[1] = G_THRESHOLD_1(r);
7849 sp->counter_val[2] = G_THRESHOLD_2(r);
7850 sp->counter_val[3] = G_THRESHOLD_3(r);
7852 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_0_AND_1);
7853 sp->timer_val[0] = core_ticks_to_us(adapter, G_TIMERVALUE0(r));
7854 sp->timer_val[1] = core_ticks_to_us(adapter, G_TIMERVALUE1(r));
7855 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_2_AND_3);
7856 sp->timer_val[2] = core_ticks_to_us(adapter, G_TIMERVALUE2(r));
7857 sp->timer_val[3] = core_ticks_to_us(adapter, G_TIMERVALUE3(r));
7858 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_4_AND_5);
7859 sp->timer_val[4] = core_ticks_to_us(adapter, G_TIMERVALUE4(r));
7860 sp->timer_val[5] = core_ticks_to_us(adapter, G_TIMERVALUE5(r));
7862 r = t4_read_reg(adapter, A_SGE_CONM_CTRL);
7863 sp->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1;
7865 sp->fl_starve_threshold2 = sp->fl_starve_threshold;
7867 sp->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(r) * 2 + 1;
7869 /* egress queues: log2 of # of doorbells per BAR2 page */
7870 r = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
7871 r >>= S_QUEUESPERPAGEPF0 +
7872 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
7873 sp->eq_s_qpp = r & M_QUEUESPERPAGEPF0;
7875 /* ingress queues: log2 of # of doorbells per BAR2 page */
7876 r = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
7877 r >>= S_QUEUESPERPAGEPF0 +
7878 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
7879 sp->iq_s_qpp = r & M_QUEUESPERPAGEPF0;
7881 r = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
7882 r >>= S_HOSTPAGESIZEPF0 +
7883 (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf;
7884 sp->page_shift = (r & M_HOSTPAGESIZEPF0) + 10;
7886 r = t4_read_reg(adapter, A_SGE_CONTROL);
7887 sp->sge_control = r;
7888 sp->spg_len = r & F_EGRSTATUSPAGESIZE ? 128 : 64;
7889 sp->fl_pktshift = G_PKTSHIFT(r);
7890 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) + 5);
7892 sp->pack_boundary = sp->pad_boundary;
7894 r = t4_read_reg(adapter, A_SGE_CONTROL2);
7895 if (G_INGPACKBOUNDARY(r) == 0)
7896 sp->pack_boundary = 16;
7898 sp->pack_boundary = 1 << (G_INGPACKBOUNDARY(r) + 5);
7900 for (i = 0; i < SGE_FLBUF_SIZES; i++)
7901 sp->sge_fl_buffer_size[i] = t4_read_reg(adapter,
7902 A_SGE_FL_BUFFER_SIZE0 + (4 * i));
7908 * Read and cache the adapter's compressed filter mode and ingress config.
7910 static void read_filter_mode_and_ingress_config(struct adapter *adap)
7912 struct tp_params *tpp = &adap->params.tp;
7914 if (t4_use_ldst(adap)) {
7915 t4_fw_tp_pio_rw(adap, &tpp->vlan_pri_map, 1,
7916 A_TP_VLAN_PRI_MAP, 1);
7917 t4_fw_tp_pio_rw(adap, &tpp->ingress_config, 1,
7918 A_TP_INGRESS_CONFIG, 1);
7920 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
7921 &tpp->vlan_pri_map, 1, A_TP_VLAN_PRI_MAP);
7922 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
7923 &tpp->ingress_config, 1, A_TP_INGRESS_CONFIG);
7927 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
7928 * shift positions of several elements of the Compressed Filter Tuple
7929 * for this adapter which we need frequently ...
7931 tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE);
7932 tpp->port_shift = t4_filter_field_shift(adap, F_PORT);
7933 tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
7934 tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN);
7935 tpp->tos_shift = t4_filter_field_shift(adap, F_TOS);
7936 tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
7937 tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE);
7938 tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH);
7939 tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE);
7940 tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION);
7943 * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
7944 * represents the presense of an Outer VLAN instead of a VNIC ID.
7946 if ((tpp->ingress_config & F_VNIC) == 0)
7947 tpp->vnic_shift = -1;
7951 * t4_init_tp_params - initialize adap->params.tp
7952 * @adap: the adapter
7954 * Initialize various fields of the adapter's TP Parameters structure.
7956 int t4_init_tp_params(struct adapter *adap)
7960 struct tp_params *tpp = &adap->params.tp;
7962 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
7963 tpp->tre = G_TIMERRESOLUTION(v);
7964 tpp->dack_re = G_DELAYEDACKRESOLUTION(v);
7966 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
7967 for (chan = 0; chan < MAX_NCHAN; chan++)
7968 tpp->tx_modq[chan] = chan;
7970 read_filter_mode_and_ingress_config(adap);
7973 * For T6, cache the adapter's compressed error vector
7974 * and passing outer header info for encapsulated packets.
7976 if (chip_id(adap) > CHELSIO_T5) {
7977 v = t4_read_reg(adap, A_TP_OUT_CONFIG);
7978 tpp->rx_pkt_encap = (v & F_CRXPKTENC) ? 1 : 0;
7985 * t4_filter_field_shift - calculate filter field shift
7986 * @adap: the adapter
7987 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
7989 * Return the shift position of a filter field within the Compressed
7990 * Filter Tuple. The filter field is specified via its selection bit
7991 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
7993 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
7995 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
7999 if ((filter_mode & filter_sel) == 0)
8002 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
8003 switch (filter_mode & sel) {
8005 field_shift += W_FT_FCOE;
8008 field_shift += W_FT_PORT;
8011 field_shift += W_FT_VNIC_ID;
8014 field_shift += W_FT_VLAN;
8017 field_shift += W_FT_TOS;
8020 field_shift += W_FT_PROTOCOL;
8023 field_shift += W_FT_ETHERTYPE;
8026 field_shift += W_FT_MACMATCH;
8029 field_shift += W_FT_MPSHITTYPE;
8031 case F_FRAGMENTATION:
8032 field_shift += W_FT_FRAGMENTATION;
8039 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id)
8043 struct fw_port_cmd c;
8045 struct port_info *p = adap2pinfo(adap, port_id);
8048 memset(&c, 0, sizeof(c));
8050 for (i = 0, j = -1; i <= p->port_id; i++) {
8053 } while ((adap->params.portvec & (1 << j)) == 0);
8056 if (!(adap->flags & IS_VF) ||
8057 adap->params.vfres.r_caps & FW_CMD_CAP_PORT) {
8058 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) |
8059 F_FW_CMD_REQUEST | F_FW_CMD_READ |
8060 V_FW_PORT_CMD_PORTID(j));
8061 c.action_to_len16 = htonl(
8062 V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
8064 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
8068 ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
8069 p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
8070 G_FW_PORT_CMD_MDIOADDR(ret) : -1;
8071 p->port_type = G_FW_PORT_CMD_PTYPE(ret);
8072 p->mod_type = G_FW_PORT_CMD_MODTYPE(ret);
8074 init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap));
8077 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
8081 p->vi[0].viid = ret;
8083 p->rx_chan_map = t4_get_mps_bg_map(adap, j);
8085 p->vi[0].rss_size = rss_size;
8086 t4_os_set_hw_addr(adap, p->port_id, addr);
8088 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
8089 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
8090 V_FW_PARAMS_PARAM_YZ(p->vi[0].viid);
8091 ret = t4_query_params(adap, mbox, pf, vf, 1, ¶m, &val);
8093 p->vi[0].rss_base = 0xffff;
8095 /* MPASS((val >> 16) == rss_size); */
8096 p->vi[0].rss_base = val & 0xffff;
8103 * t4_read_cimq_cfg - read CIM queue configuration
8104 * @adap: the adapter
8105 * @base: holds the queue base addresses in bytes
8106 * @size: holds the queue sizes in bytes
8107 * @thres: holds the queue full thresholds in bytes
8109 * Returns the current configuration of the CIM queues, starting with
8110 * the IBQs, then the OBQs.
8112 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
8115 int cim_num_obq = adap->chip_params->cim_num_obq;
8117 for (i = 0; i < CIM_NUM_IBQ; i++) {
8118 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
8120 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
8121 /* value is in 256-byte units */
8122 *base++ = G_CIMQBASE(v) * 256;
8123 *size++ = G_CIMQSIZE(v) * 256;
8124 *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
8126 for (i = 0; i < cim_num_obq; i++) {
8127 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
8129 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
8130 /* value is in 256-byte units */
8131 *base++ = G_CIMQBASE(v) * 256;
8132 *size++ = G_CIMQSIZE(v) * 256;
8137 * t4_read_cim_ibq - read the contents of a CIM inbound queue
8138 * @adap: the adapter
8139 * @qid: the queue index
8140 * @data: where to store the queue contents
8141 * @n: capacity of @data in 32-bit words
8143 * Reads the contents of the selected CIM queue starting at address 0 up
8144 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
8145 * error and the number of 32-bit words actually read on success.
8147 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
8149 int i, err, attempts;
8151 const unsigned int nwords = CIM_IBQ_SIZE * 4;
8153 if (qid > 5 || (n & 3))
8156 addr = qid * nwords;
8160 /* It might take 3-10ms before the IBQ debug read access is allowed.
8161 * Wait for 1 Sec with a delay of 1 usec.
8165 for (i = 0; i < n; i++, addr++) {
8166 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
8168 err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
8172 *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
8174 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
8179 * t4_read_cim_obq - read the contents of a CIM outbound queue
8180 * @adap: the adapter
8181 * @qid: the queue index
8182 * @data: where to store the queue contents
8183 * @n: capacity of @data in 32-bit words
8185 * Reads the contents of the selected CIM queue starting at address 0 up
8186 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
8187 * error and the number of 32-bit words actually read on success.
8189 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
8192 unsigned int addr, v, nwords;
8193 int cim_num_obq = adap->chip_params->cim_num_obq;
8195 if ((qid > (cim_num_obq - 1)) || (n & 3))
8198 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
8199 V_QUENUMSELECT(qid));
8200 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
8202 addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */
8203 nwords = G_CIMQSIZE(v) * 64; /* same */
8207 for (i = 0; i < n; i++, addr++) {
8208 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
8210 err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
8214 *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
8216 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
8222 CIM_CTL_BASE = 0x2000,
8223 CIM_PBT_ADDR_BASE = 0x2800,
8224 CIM_PBT_LRF_BASE = 0x3000,
8225 CIM_PBT_DATA_BASE = 0x3800
8229 * t4_cim_read - read a block from CIM internal address space
8230 * @adap: the adapter
8231 * @addr: the start address within the CIM address space
8232 * @n: number of words to read
8233 * @valp: where to store the result
8235 * Reads a block of 4-byte words from the CIM intenal address space.
8237 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
8242 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
8245 for ( ; !ret && n--; addr += 4) {
8246 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
8247 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
8250 *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
8256 * t4_cim_write - write a block into CIM internal address space
8257 * @adap: the adapter
8258 * @addr: the start address within the CIM address space
8259 * @n: number of words to write
8260 * @valp: set of values to write
8262 * Writes a block of 4-byte words into the CIM intenal address space.
8264 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
8265 const unsigned int *valp)
8269 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
8272 for ( ; !ret && n--; addr += 4) {
8273 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
8274 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
8275 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
8281 static int t4_cim_write1(struct adapter *adap, unsigned int addr,
8284 return t4_cim_write(adap, addr, 1, &val);
8288 * t4_cim_ctl_read - read a block from CIM control region
8289 * @adap: the adapter
8290 * @addr: the start address within the CIM control region
8291 * @n: number of words to read
8292 * @valp: where to store the result
8294 * Reads a block of 4-byte words from the CIM control region.
8296 int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
8299 return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
8303 * t4_cim_read_la - read CIM LA capture buffer
8304 * @adap: the adapter
8305 * @la_buf: where to store the LA data
8306 * @wrptr: the HW write pointer within the capture buffer
8308 * Reads the contents of the CIM LA buffer with the most recent entry at
8309 * the end of the returned data and with the entry at @wrptr first.
8310 * We try to leave the LA in the running state we find it in.
8312 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
8315 unsigned int cfg, val, idx;
8317 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
8321 if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */
8322 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
8327 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
8331 idx = G_UPDBGLAWRPTR(val);
8335 for (i = 0; i < adap->params.cim_la_size; i++) {
8336 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
8337 V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
8340 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
8343 if (val & F_UPDBGLARDEN) {
8347 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
8351 /* address can't exceed 0xfff (UpDbgLaRdPtr is of 12-bits) */
8352 idx = (idx + 1) & M_UPDBGLARDPTR;
8354 * Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
8355 * identify the 32-bit portion of the full 312-bit data
8358 while ((idx & 0xf) > 9)
8359 idx = (idx + 1) % M_UPDBGLARDPTR;
8362 if (cfg & F_UPDBGLAEN) {
8363 int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
8364 cfg & ~F_UPDBGLARDEN);
8372 * t4_tp_read_la - read TP LA capture buffer
8373 * @adap: the adapter
8374 * @la_buf: where to store the LA data
8375 * @wrptr: the HW write pointer within the capture buffer
8377 * Reads the contents of the TP LA buffer with the most recent entry at
8378 * the end of the returned data and with the entry at @wrptr first.
8379 * We leave the LA in the running state we find it in.
8381 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
8383 bool last_incomplete;
8384 unsigned int i, cfg, val, idx;
8386 cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
8387 if (cfg & F_DBGLAENABLE) /* freeze LA */
8388 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
8389 adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
8391 val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
8392 idx = G_DBGLAWPTR(val);
8393 last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
8394 if (last_incomplete)
8395 idx = (idx + 1) & M_DBGLARPTR;
8400 val &= ~V_DBGLARPTR(M_DBGLARPTR);
8401 val |= adap->params.tp.la_mask;
8403 for (i = 0; i < TPLA_SIZE; i++) {
8404 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
8405 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
8406 idx = (idx + 1) & M_DBGLARPTR;
8409 /* Wipe out last entry if it isn't valid */
8410 if (last_incomplete)
8411 la_buf[TPLA_SIZE - 1] = ~0ULL;
8413 if (cfg & F_DBGLAENABLE) /* restore running state */
8414 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
8415 cfg | adap->params.tp.la_mask);
8419 * SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
8420 * seconds). If we find one of the SGE Ingress DMA State Machines in the same
8421 * state for more than the Warning Threshold then we'll issue a warning about
8422 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel
8423 * appears to be hung every Warning Repeat second till the situation clears.
8424 * If the situation clears, we'll note that as well.
8426 #define SGE_IDMA_WARN_THRESH 1
8427 #define SGE_IDMA_WARN_REPEAT 300
8430 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
8431 * @adapter: the adapter
8432 * @idma: the adapter IDMA Monitor state
8434 * Initialize the state of an SGE Ingress DMA Monitor.
8436 void t4_idma_monitor_init(struct adapter *adapter,
8437 struct sge_idma_monitor_state *idma)
8439 /* Initialize the state variables for detecting an SGE Ingress DMA
8440 * hang. The SGE has internal counters which count up on each clock
8441 * tick whenever the SGE finds its Ingress DMA State Engines in the
8442 * same state they were on the previous clock tick. The clock used is
8443 * the Core Clock so we have a limit on the maximum "time" they can
8444 * record; typically a very small number of seconds. For instance,
8445 * with a 600MHz Core Clock, we can only count up to a bit more than
8446 * 7s. So we'll synthesize a larger counter in order to not run the
8447 * risk of having the "timers" overflow and give us the flexibility to
8448 * maintain a Hung SGE State Machine of our own which operates across
8449 * a longer time frame.
8451 idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
8452 idma->idma_stalled[0] = idma->idma_stalled[1] = 0;
8456 * t4_idma_monitor - monitor SGE Ingress DMA state
8457 * @adapter: the adapter
8458 * @idma: the adapter IDMA Monitor state
8459 * @hz: number of ticks/second
8460 * @ticks: number of ticks since the last IDMA Monitor call
8462 void t4_idma_monitor(struct adapter *adapter,
8463 struct sge_idma_monitor_state *idma,
8466 int i, idma_same_state_cnt[2];
8468 /* Read the SGE Debug Ingress DMA Same State Count registers. These
8469 * are counters inside the SGE which count up on each clock when the
8470 * SGE finds its Ingress DMA State Engines in the same states they
8471 * were in the previous clock. The counters will peg out at
8472 * 0xffffffff without wrapping around so once they pass the 1s
8473 * threshold they'll stay above that till the IDMA state changes.
8475 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13);
8476 idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH);
8477 idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
8479 for (i = 0; i < 2; i++) {
8480 u32 debug0, debug11;
8482 /* If the Ingress DMA Same State Counter ("timer") is less
8483 * than 1s, then we can reset our synthesized Stall Timer and
8484 * continue. If we have previously emitted warnings about a
8485 * potential stalled Ingress Queue, issue a note indicating
8486 * that the Ingress Queue has resumed forward progress.
8488 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
8489 if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz)
8490 CH_WARN(adapter, "SGE idma%d, queue %u, "
8491 "resumed after %d seconds\n",
8492 i, idma->idma_qid[i],
8493 idma->idma_stalled[i]/hz);
8494 idma->idma_stalled[i] = 0;
8498 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
8499 * domain. The first time we get here it'll be because we
8500 * passed the 1s Threshold; each additional time it'll be
8501 * because the RX Timer Callback is being fired on its regular
8504 * If the stall is below our Potential Hung Ingress Queue
8505 * Warning Threshold, continue.
8507 if (idma->idma_stalled[i] == 0) {
8508 idma->idma_stalled[i] = hz;
8509 idma->idma_warn[i] = 0;
8511 idma->idma_stalled[i] += ticks;
8512 idma->idma_warn[i] -= ticks;
8515 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz)
8518 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
8520 if (idma->idma_warn[i] > 0)
8522 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz;
8524 /* Read and save the SGE IDMA State and Queue ID information.
8525 * We do this every time in case it changes across time ...
8526 * can't be too careful ...
8528 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0);
8529 debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
8530 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
8532 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11);
8533 debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
8534 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
8536 CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in "
8537 " state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
8538 i, idma->idma_qid[i], idma->idma_state[i],
8539 idma->idma_stalled[i]/hz,
8541 t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
8546 * t4_read_pace_tbl - read the pace table
8547 * @adap: the adapter
8548 * @pace_vals: holds the returned values
8550 * Returns the values of TP's pace table in microseconds.
8552 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
8556 for (i = 0; i < NTX_SCHED; i++) {
8557 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
8558 v = t4_read_reg(adap, A_TP_PACE_TABLE);
8559 pace_vals[i] = dack_ticks_to_usec(adap, v);
8564 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
8565 * @adap: the adapter
8566 * @sched: the scheduler index
8567 * @kbps: the byte rate in Kbps
8568 * @ipg: the interpacket delay in tenths of nanoseconds
8570 * Return the current configuration of a HW Tx scheduler.
8572 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
8575 unsigned int v, addr, bpt, cpt;
8578 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
8579 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
8580 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
8583 bpt = (v >> 8) & 0xff;
8586 *kbps = 0; /* scheduler disabled */
8588 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
8589 *kbps = (v * bpt) / 125;
8593 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
8594 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
8595 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
8599 *ipg = (10000 * v) / core_ticks_per_usec(adap);
8604 * t4_load_cfg - download config file
8605 * @adap: the adapter
8606 * @cfg_data: the cfg text file to write
8607 * @size: text file size
8609 * Write the supplied config text file to the card's serial flash.
8611 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
8613 int ret, i, n, cfg_addr;
8615 unsigned int flash_cfg_start_sec;
8616 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
8618 cfg_addr = t4_flash_cfg_addr(adap);
8623 flash_cfg_start_sec = addr / SF_SEC_SIZE;
8625 if (size > FLASH_CFG_MAX_SIZE) {
8626 CH_ERR(adap, "cfg file too large, max is %u bytes\n",
8627 FLASH_CFG_MAX_SIZE);
8631 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
8633 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
8634 flash_cfg_start_sec + i - 1);
8636 * If size == 0 then we're simply erasing the FLASH sectors associated
8637 * with the on-adapter Firmware Configuration File.
8639 if (ret || size == 0)
8642 /* this will write to the flash up to SF_PAGE_SIZE at a time */
8643 for (i = 0; i< size; i+= SF_PAGE_SIZE) {
8644 if ( (size - i) < SF_PAGE_SIZE)
8648 ret = t4_write_flash(adap, addr, n, cfg_data, 1);
8652 addr += SF_PAGE_SIZE;
8653 cfg_data += SF_PAGE_SIZE;
8658 CH_ERR(adap, "config file %s failed %d\n",
8659 (size == 0 ? "clear" : "download"), ret);
8664 * t5_fw_init_extern_mem - initialize the external memory
8665 * @adap: the adapter
8667 * Initializes the external memory on T5.
8669 int t5_fw_init_extern_mem(struct adapter *adap)
8671 u32 params[1], val[1];
8677 val[0] = 0xff; /* Initialize all MCs */
8678 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
8679 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT));
8680 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val,
8681 FW_CMD_MAX_TIMEOUT);
8686 /* BIOS boot headers */
8687 typedef struct pci_expansion_rom_header {
8688 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
8689 u8 reserved[22]; /* Reserved per processor Architecture data */
8690 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
8691 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
8693 /* Legacy PCI Expansion ROM Header */
8694 typedef struct legacy_pci_expansion_rom_header {
8695 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
8696 u8 size512; /* Current Image Size in units of 512 bytes */
8697 u8 initentry_point[4];
8698 u8 cksum; /* Checksum computed on the entire Image */
8699 u8 reserved[16]; /* Reserved */
8700 u8 pcir_offset[2]; /* Offset to PCI Data Struture */
8701 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
8703 /* EFI PCI Expansion ROM Header */
8704 typedef struct efi_pci_expansion_rom_header {
8705 u8 signature[2]; // ROM signature. The value 0xaa55
8706 u8 initialization_size[2]; /* Units 512. Includes this header */
8707 u8 efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
8708 u8 efi_subsystem[2]; /* Subsystem value for EFI image header */
8709 u8 efi_machine_type[2]; /* Machine type from EFI image header */
8710 u8 compression_type[2]; /* Compression type. */
8712 * Compression type definition
8715 * 0x2-0xFFFF: Reserved
8717 u8 reserved[8]; /* Reserved */
8718 u8 efi_image_header_offset[2]; /* Offset to EFI Image */
8719 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
8720 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
8722 /* PCI Data Structure Format */
8723 typedef struct pcir_data_structure { /* PCI Data Structure */
8724 u8 signature[4]; /* Signature. The string "PCIR" */
8725 u8 vendor_id[2]; /* Vendor Identification */
8726 u8 device_id[2]; /* Device Identification */
8727 u8 vital_product[2]; /* Pointer to Vital Product Data */
8728 u8 length[2]; /* PCIR Data Structure Length */
8729 u8 revision; /* PCIR Data Structure Revision */
8730 u8 class_code[3]; /* Class Code */
8731 u8 image_length[2]; /* Image Length. Multiple of 512B */
8732 u8 code_revision[2]; /* Revision Level of Code/Data */
8733 u8 code_type; /* Code Type. */
8735 * PCI Expansion ROM Code Types
8736 * 0x00: Intel IA-32, PC-AT compatible. Legacy
8737 * 0x01: Open Firmware standard for PCI. FCODE
8738 * 0x02: Hewlett-Packard PA RISC. HP reserved
8739 * 0x03: EFI Image. EFI
8740 * 0x04-0xFF: Reserved.
8742 u8 indicator; /* Indicator. Identifies the last image in the ROM */
8743 u8 reserved[2]; /* Reserved */
8744 } pcir_data_t; /* PCI__DATA_STRUCTURE */
8746 /* BOOT constants */
8748 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
8749 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */
8750 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */
8751 BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
8752 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment */
8753 VENDOR_ID = 0x1425, /* Vendor ID */
8754 PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
8758 * modify_device_id - Modifies the device ID of the Boot BIOS image
8759 * @adatper: the device ID to write.
8760 * @boot_data: the boot image to modify.
8762 * Write the supplied device ID to the boot BIOS image.
8764 static void modify_device_id(int device_id, u8 *boot_data)
8766 legacy_pci_exp_rom_header_t *header;
8767 pcir_data_t *pcir_header;
8771 * Loop through all chained images and change the device ID's
8774 header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
8775 pcir_header = (pcir_data_t *) &boot_data[cur_header +
8776 le16_to_cpu(*(u16*)header->pcir_offset)];
8779 * Only modify the Device ID if code type is Legacy or HP.
8780 * 0x00: Okay to modify
8781 * 0x01: FCODE. Do not be modify
8782 * 0x03: Okay to modify
8783 * 0x04-0xFF: Do not modify
8785 if (pcir_header->code_type == 0x00) {
8790 * Modify Device ID to match current adatper
8792 *(u16*) pcir_header->device_id = device_id;
8795 * Set checksum temporarily to 0.
8796 * We will recalculate it later.
8798 header->cksum = 0x0;
8801 * Calculate and update checksum
8803 for (i = 0; i < (header->size512 * 512); i++)
8804 csum += (u8)boot_data[cur_header + i];
8807 * Invert summed value to create the checksum
8808 * Writing new checksum value directly to the boot data
8810 boot_data[cur_header + 7] = -csum;
8812 } else if (pcir_header->code_type == 0x03) {
8815 * Modify Device ID to match current adatper
8817 *(u16*) pcir_header->device_id = device_id;
8823 * Check indicator element to identify if this is the last
8826 if (pcir_header->indicator & 0x80)
8830 * Move header pointer up to the next image in the ROM.
8832 cur_header += header->size512 * 512;
8837 * t4_load_boot - download boot flash
8838 * @adapter: the adapter
8839 * @boot_data: the boot image to write
8840 * @boot_addr: offset in flash to write boot_data
8843 * Write the supplied boot image to the card's serial flash.
8844 * The boot image has the following sections: a 28-byte header and the
8847 int t4_load_boot(struct adapter *adap, u8 *boot_data,
8848 unsigned int boot_addr, unsigned int size)
8850 pci_exp_rom_header_t *header;
8852 pcir_data_t *pcir_header;
8856 unsigned int boot_sector = (boot_addr * 1024 );
8857 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
8860 * Make sure the boot image does not encroach on the firmware region
8862 if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
8863 CH_ERR(adap, "boot image encroaching on firmware region\n");
8868 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
8869 * and Boot configuration data sections. These 3 boot sections span
8870 * sectors 0 to 7 in flash and live right before the FW image location.
8872 i = DIV_ROUND_UP(size ? size : FLASH_FW_START,
8874 ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
8875 (boot_sector >> 16) + i - 1);
8878 * If size == 0 then we're simply erasing the FLASH sectors associated
8879 * with the on-adapter option ROM file
8881 if (ret || (size == 0))
8884 /* Get boot header */
8885 header = (pci_exp_rom_header_t *)boot_data;
8886 pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
8887 /* PCIR Data Structure */
8888 pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
8891 * Perform some primitive sanity testing to avoid accidentally
8892 * writing garbage over the boot sectors. We ought to check for
8893 * more but it's not worth it for now ...
8895 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
8896 CH_ERR(adap, "boot image too small/large\n");
8900 #ifndef CHELSIO_T4_DIAGS
8902 * Check BOOT ROM header signature
8904 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
8905 CH_ERR(adap, "Boot image missing signature\n");
8910 * Check PCI header signature
8912 if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
8913 CH_ERR(adap, "PCI header missing signature\n");
8918 * Check Vendor ID matches Chelsio ID
8920 if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
8921 CH_ERR(adap, "Vendor ID missing signature\n");
8927 * Retrieve adapter's device ID
8929 t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
8930 /* Want to deal with PF 0 so I strip off PF 4 indicator */
8931 device_id = device_id & 0xf0ff;
8934 * Check PCIE Device ID
8936 if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
8938 * Change the device ID in the Boot BIOS image to match
8939 * the Device ID of the current adapter.
8941 modify_device_id(device_id, boot_data);
8945 * Skip over the first SF_PAGE_SIZE worth of data and write it after
8946 * we finish copying the rest of the boot image. This will ensure
8947 * that the BIOS boot header will only be written if the boot image
8948 * was written in full.
8951 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
8952 addr += SF_PAGE_SIZE;
8953 boot_data += SF_PAGE_SIZE;
8954 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
8959 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
8960 (const u8 *)header, 0);
8964 CH_ERR(adap, "boot image download failed, error %d\n", ret);
8969 * t4_flash_bootcfg_addr - return the address of the flash optionrom configuration
8970 * @adapter: the adapter
8972 * Return the address within the flash where the OptionROM Configuration
8973 * is stored, or an error if the device FLASH is too small to contain
8974 * a OptionROM Configuration.
8976 static int t4_flash_bootcfg_addr(struct adapter *adapter)
8979 * If the device FLASH isn't large enough to hold a Firmware
8980 * Configuration File, return an error.
8982 if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE)
8985 return FLASH_BOOTCFG_START;
8988 int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size)
8990 int ret, i, n, cfg_addr;
8992 unsigned int flash_cfg_start_sec;
8993 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
8995 cfg_addr = t4_flash_bootcfg_addr(adap);
9000 flash_cfg_start_sec = addr / SF_SEC_SIZE;
9002 if (size > FLASH_BOOTCFG_MAX_SIZE) {
9003 CH_ERR(adap, "bootcfg file too large, max is %u bytes\n",
9004 FLASH_BOOTCFG_MAX_SIZE);
9008 i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */
9010 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
9011 flash_cfg_start_sec + i - 1);
9014 * If size == 0 then we're simply erasing the FLASH sectors associated
9015 * with the on-adapter OptionROM Configuration File.
9017 if (ret || size == 0)
9020 /* this will write to the flash up to SF_PAGE_SIZE at a time */
9021 for (i = 0; i< size; i+= SF_PAGE_SIZE) {
9022 if ( (size - i) < SF_PAGE_SIZE)
9026 ret = t4_write_flash(adap, addr, n, cfg_data, 0);
9030 addr += SF_PAGE_SIZE;
9031 cfg_data += SF_PAGE_SIZE;
9036 CH_ERR(adap, "boot config data %s failed %d\n",
9037 (size == 0 ? "clear" : "download"), ret);
9042 * t4_set_filter_mode - configure the optional components of filter tuples
9043 * @adap: the adapter
9044 * @mode_map: a bitmap selcting which optional filter components to enable
9046 * Sets the filter mode by selecting the optional components to enable
9047 * in filter tuples. Returns 0 on success and a negative error if the
9048 * requested mode needs more bits than are available for optional
9051 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map)
9053 static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
9057 for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
9058 if (mode_map & (1 << i))
9060 if (nbits > FILTER_OPT_LEN)
9062 if (t4_use_ldst(adap))
9063 t4_fw_tp_pio_rw(adap, &mode_map, 1, A_TP_VLAN_PRI_MAP, 0);
9065 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map,
9066 1, A_TP_VLAN_PRI_MAP);
9067 read_filter_mode_and_ingress_config(adap);
9073 * t4_clr_port_stats - clear port statistics
9074 * @adap: the adapter
9075 * @idx: the port index
9077 * Clear HW statistics for the given port.
9079 void t4_clr_port_stats(struct adapter *adap, int idx)
9082 u32 bgmap = t4_get_mps_bg_map(adap, idx);
9086 port_base_addr = PORT_BASE(idx);
9088 port_base_addr = T5_PORT_BASE(idx);
9090 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
9091 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
9092 t4_write_reg(adap, port_base_addr + i, 0);
9093 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
9094 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
9095 t4_write_reg(adap, port_base_addr + i, 0);
9096 for (i = 0; i < 4; i++)
9097 if (bgmap & (1 << i)) {
9099 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
9101 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
9106 * t4_i2c_rd - read I2C data from adapter
9107 * @adap: the adapter
9108 * @port: Port number if per-port device; <0 if not
9109 * @devid: per-port device ID or absolute device ID
9110 * @offset: byte offset into device I2C space
9111 * @len: byte length of I2C space data
9112 * @buf: buffer in which to return I2C data
9114 * Reads the I2C data from the indicated device and location.
9116 int t4_i2c_rd(struct adapter *adap, unsigned int mbox,
9117 int port, unsigned int devid,
9118 unsigned int offset, unsigned int len,
9122 struct fw_ldst_cmd ldst;
9128 len > sizeof ldst.u.i2c.data)
9131 memset(&ldst, 0, sizeof ldst);
9132 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
9133 ldst.op_to_addrspace =
9134 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
9138 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
9139 ldst.u.i2c.pid = (port < 0 ? 0xff : port);
9140 ldst.u.i2c.did = devid;
9141 ldst.u.i2c.boffset = offset;
9142 ldst.u.i2c.blen = len;
9143 ret = t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
9145 memcpy(buf, ldst.u.i2c.data, len);
9150 * t4_i2c_wr - write I2C data to adapter
9151 * @adap: the adapter
9152 * @port: Port number if per-port device; <0 if not
9153 * @devid: per-port device ID or absolute device ID
9154 * @offset: byte offset into device I2C space
9155 * @len: byte length of I2C space data
9156 * @buf: buffer containing new I2C data
9158 * Write the I2C data to the indicated device and location.
9160 int t4_i2c_wr(struct adapter *adap, unsigned int mbox,
9161 int port, unsigned int devid,
9162 unsigned int offset, unsigned int len,
9166 struct fw_ldst_cmd ldst;
9171 len > sizeof ldst.u.i2c.data)
9174 memset(&ldst, 0, sizeof ldst);
9175 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
9176 ldst.op_to_addrspace =
9177 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
9181 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
9182 ldst.u.i2c.pid = (port < 0 ? 0xff : port);
9183 ldst.u.i2c.did = devid;
9184 ldst.u.i2c.boffset = offset;
9185 ldst.u.i2c.blen = len;
9186 memcpy(ldst.u.i2c.data, buf, len);
9187 return t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
9191 * t4_sge_ctxt_rd - read an SGE context through FW
9192 * @adap: the adapter
9193 * @mbox: mailbox to use for the FW command
9194 * @cid: the context id
9195 * @ctype: the context type
9196 * @data: where to store the context data
9198 * Issues a FW command through the given mailbox to read an SGE context.
9200 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
9201 enum ctxt_type ctype, u32 *data)
9204 struct fw_ldst_cmd c;
9206 if (ctype == CTXT_EGRESS)
9207 ret = FW_LDST_ADDRSPC_SGE_EGRC;
9208 else if (ctype == CTXT_INGRESS)
9209 ret = FW_LDST_ADDRSPC_SGE_INGC;
9210 else if (ctype == CTXT_FLM)
9211 ret = FW_LDST_ADDRSPC_SGE_FLMC;
9213 ret = FW_LDST_ADDRSPC_SGE_CONMC;
9215 memset(&c, 0, sizeof(c));
9216 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
9217 F_FW_CMD_REQUEST | F_FW_CMD_READ |
9218 V_FW_LDST_CMD_ADDRSPACE(ret));
9219 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
9220 c.u.idctxt.physid = cpu_to_be32(cid);
9222 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
9224 data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
9225 data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
9226 data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
9227 data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
9228 data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
9229 data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
9235 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
9236 * @adap: the adapter
9237 * @cid: the context id
9238 * @ctype: the context type
9239 * @data: where to store the context data
9241 * Reads an SGE context directly, bypassing FW. This is only for
9242 * debugging when FW is unavailable.
9244 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
9249 t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
9250 ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
9252 for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
9253 *data++ = t4_read_reg(adap, i);
9257 int t4_sched_config(struct adapter *adapter, int type, int minmaxen,
9260 struct fw_sched_cmd cmd;
9262 memset(&cmd, 0, sizeof(cmd));
9263 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
9266 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9268 cmd.u.config.sc = FW_SCHED_SC_CONFIG;
9269 cmd.u.config.type = type;
9270 cmd.u.config.minmaxen = minmaxen;
9272 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
9276 int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
9277 int rateunit, int ratemode, int channel, int cl,
9278 int minrate, int maxrate, int weight, int pktsize,
9281 struct fw_sched_cmd cmd;
9283 memset(&cmd, 0, sizeof(cmd));
9284 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
9287 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9289 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
9290 cmd.u.params.type = type;
9291 cmd.u.params.level = level;
9292 cmd.u.params.mode = mode;
9293 cmd.u.params.ch = channel;
9294 cmd.u.params.cl = cl;
9295 cmd.u.params.unit = rateunit;
9296 cmd.u.params.rate = ratemode;
9297 cmd.u.params.min = cpu_to_be32(minrate);
9298 cmd.u.params.max = cpu_to_be32(maxrate);
9299 cmd.u.params.weight = cpu_to_be16(weight);
9300 cmd.u.params.pktsize = cpu_to_be16(pktsize);
9302 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
9307 * t4_config_watchdog - configure (enable/disable) a watchdog timer
9308 * @adapter: the adapter
9309 * @mbox: mailbox to use for the FW command
9310 * @pf: the PF owning the queue
9311 * @vf: the VF owning the queue
9312 * @timeout: watchdog timeout in ms
9313 * @action: watchdog timer / action
9315 * There are separate watchdog timers for each possible watchdog
9316 * action. Configure one of the watchdog timers by setting a non-zero
9317 * timeout. Disable a watchdog timer by using a timeout of zero.
9319 int t4_config_watchdog(struct adapter *adapter, unsigned int mbox,
9320 unsigned int pf, unsigned int vf,
9321 unsigned int timeout, unsigned int action)
9323 struct fw_watchdog_cmd wdog;
9327 * The watchdog command expects a timeout in units of 10ms so we need
9328 * to convert it here (via rounding) and force a minimum of one 10ms
9329 * "tick" if the timeout is non-zero but the convertion results in 0
9332 ticks = (timeout + 5)/10;
9333 if (timeout && !ticks)
9336 memset(&wdog, 0, sizeof wdog);
9337 wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) |
9340 V_FW_PARAMS_CMD_PFN(pf) |
9341 V_FW_PARAMS_CMD_VFN(vf));
9342 wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog));
9343 wdog.timeout = cpu_to_be32(ticks);
9344 wdog.action = cpu_to_be32(action);
9346 return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL);
9349 int t4_get_devlog_level(struct adapter *adapter, unsigned int *level)
9351 struct fw_devlog_cmd devlog_cmd;
9354 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
9355 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
9356 F_FW_CMD_REQUEST | F_FW_CMD_READ);
9357 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
9358 ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
9359 sizeof(devlog_cmd), &devlog_cmd);
9363 *level = devlog_cmd.level;
9367 int t4_set_devlog_level(struct adapter *adapter, unsigned int level)
9369 struct fw_devlog_cmd devlog_cmd;
9371 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
9372 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
9375 devlog_cmd.level = level;
9376 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
9377 return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
9378 sizeof(devlog_cmd), &devlog_cmd);