2 * Copyright (c) 2012, 2016 Chelsio Communications, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/eventhandler.h>
37 #include "t4_regs_values.h"
38 #include "firmware/t4fw_interface.h"
41 #define msleep(x) do { \
45 pause("t4hw", (x) * hz / 1000); \
49 * t4_wait_op_done_val - wait until an operation is completed
50 * @adapter: the adapter performing the operation
51 * @reg: the register to check for completion
52 * @mask: a single-bit field within @reg that indicates completion
53 * @polarity: the value of the field when the operation is completed
54 * @attempts: number of check iterations
55 * @delay: delay in usecs between iterations
56 * @valp: where to store the value of the register at completion time
58 * Wait until an operation is completed by checking a bit in a register
59 * up to @attempts times. If @valp is not NULL the value of the register
60 * at the time it indicated completion is stored there. Returns 0 if the
61 * operation completes and -EAGAIN otherwise.
63 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
64 int polarity, int attempts, int delay, u32 *valp)
67 u32 val = t4_read_reg(adapter, reg);
69 if (!!(val & mask) == polarity) {
81 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
82 int polarity, int attempts, int delay)
84 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
89 * t4_set_reg_field - set a register field to a value
90 * @adapter: the adapter to program
91 * @addr: the register address
92 * @mask: specifies the portion of the register to modify
93 * @val: the new value for the register field
95 * Sets a register field specified by the supplied mask to the
98 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
101 u32 v = t4_read_reg(adapter, addr) & ~mask;
103 t4_write_reg(adapter, addr, v | val);
104 (void) t4_read_reg(adapter, addr); /* flush */
108 * t4_read_indirect - read indirectly addressed registers
110 * @addr_reg: register holding the indirect address
111 * @data_reg: register holding the value of the indirect register
112 * @vals: where the read register values are stored
113 * @nregs: how many indirect registers to read
114 * @start_idx: index of first indirect register to read
116 * Reads registers that are accessed indirectly through an address/data
119 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
120 unsigned int data_reg, u32 *vals,
121 unsigned int nregs, unsigned int start_idx)
124 t4_write_reg(adap, addr_reg, start_idx);
125 *vals++ = t4_read_reg(adap, data_reg);
131 * t4_write_indirect - write indirectly addressed registers
133 * @addr_reg: register holding the indirect addresses
134 * @data_reg: register holding the value for the indirect registers
135 * @vals: values to write
136 * @nregs: how many indirect registers to write
137 * @start_idx: address of first indirect register to write
139 * Writes a sequential block of registers that are accessed indirectly
140 * through an address/data register pair.
142 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
143 unsigned int data_reg, const u32 *vals,
144 unsigned int nregs, unsigned int start_idx)
147 t4_write_reg(adap, addr_reg, start_idx++);
148 t4_write_reg(adap, data_reg, *vals++);
153 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
154 * mechanism. This guarantees that we get the real value even if we're
155 * operating within a Virtual Machine and the Hypervisor is trapping our
156 * Configuration Space accesses.
158 * N.B. This routine should only be used as a last resort: the firmware uses
159 * the backdoor registers on a regular basis and we can end up
160 * conflicting with it's uses!
162 u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
164 u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg);
167 if (chip_id(adap) <= CHELSIO_T5)
175 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req);
176 val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
179 * Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
180 * Configuration Space read. (None of the other fields matter when
181 * F_ENABLE is 0 so a simple register write is easier than a
182 * read-modify-write via t4_set_reg_field().)
184 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0);
190 * t4_report_fw_error - report firmware error
193 * The adapter firmware can indicate error conditions to the host.
194 * If the firmware has indicated an error, print out the reason for
195 * the firmware error.
197 static void t4_report_fw_error(struct adapter *adap)
199 static const char *const reason[] = {
200 "Crash", /* PCIE_FW_EVAL_CRASH */
201 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
202 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
203 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
204 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
205 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
206 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
207 "Reserved", /* reserved */
211 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
212 if (pcie_fw & F_PCIE_FW_ERR)
213 CH_ERR(adap, "Firmware reports adapter error: %s\n",
214 reason[G_PCIE_FW_EVAL(pcie_fw)]);
218 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
220 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
223 for ( ; nflit; nflit--, mbox_addr += 8)
224 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
228 * Handle a FW assertion reported in a mailbox.
230 static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt)
233 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
234 asrt->u.assert.filename_0_7,
235 be32_to_cpu(asrt->u.assert.line),
236 be32_to_cpu(asrt->u.assert.x),
237 be32_to_cpu(asrt->u.assert.y));
240 #define X_CIM_PF_NOACCESS 0xeeeeeeee
242 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
244 * @mbox: index of the mailbox to use
245 * @cmd: the command to write
246 * @size: command length in bytes
247 * @rpl: where to optionally store the reply
248 * @sleep_ok: if true we may sleep while awaiting command completion
249 * @timeout: time to wait for command to finish before timing out
250 * (negative implies @sleep_ok=false)
252 * Sends the given command to FW through the selected mailbox and waits
253 * for the FW to execute the command. If @rpl is not %NULL it is used to
254 * store the FW's reply to the command. The command and its optional
255 * reply are of the same length. Some FW commands like RESET and
256 * INITIALIZE can take a considerable amount of time to execute.
257 * @sleep_ok determines whether we may sleep while awaiting the response.
258 * If sleeping is allowed we use progressive backoff otherwise we spin.
259 * Note that passing in a negative @timeout is an alternate mechanism
260 * for specifying @sleep_ok=false. This is useful when a higher level
261 * interface allows for specification of @timeout but not @sleep_ok ...
263 * The return value is 0 on success or a negative errno on failure. A
264 * failure can happen either because we are not able to execute the
265 * command or FW executes it but signals an error. In the latter case
266 * the return value is the error code indicated by FW (negated).
268 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
269 int size, void *rpl, bool sleep_ok, int timeout)
272 * We delay in small increments at first in an effort to maintain
273 * responsiveness for simple, fast executing commands but then back
274 * off to larger delays to a maximum retry delay.
276 static const int delay[] = {
277 1, 1, 3, 5, 10, 10, 20, 50, 100
281 int i, ms, delay_idx, ret;
282 const __be64 *p = cmd;
283 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
284 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
286 __be64 cmd_rpl[MBOX_LEN/8];
289 if (adap->flags & CHK_MBOX_ACCESS)
290 ASSERT_SYNCHRONIZED_OP(adap);
292 if ((size & 15) || size > MBOX_LEN)
295 if (adap->flags & IS_VF) {
297 data_reg = FW_T6VF_MBDATA_BASE_ADDR;
299 data_reg = FW_T4VF_MBDATA_BASE_ADDR;
300 ctl_reg = VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL);
304 * If we have a negative timeout, that implies that we can't sleep.
312 * Attempt to gain access to the mailbox.
314 for (i = 0; i < 4; i++) {
315 ctl = t4_read_reg(adap, ctl_reg);
317 if (v != X_MBOWNER_NONE)
322 * If we were unable to gain access, dequeue ourselves from the
323 * mailbox atomic access list and report the error to our caller.
325 if (v != X_MBOWNER_PL) {
326 t4_report_fw_error(adap);
327 ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
332 * If we gain ownership of the mailbox and there's a "valid" message
333 * in it, this is likely an asynchronous error message from the
334 * firmware. So we'll report that and then proceed on with attempting
335 * to issue our own command ... which may well fail if the error
336 * presaged the firmware crashing ...
338 if (ctl & F_MBMSGVALID) {
339 CH_ERR(adap, "found VALID command in mbox %u: %016llx %016llx "
340 "%016llx %016llx %016llx %016llx %016llx %016llx\n",
341 mbox, (unsigned long long)t4_read_reg64(adap, data_reg),
342 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
343 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
344 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
345 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
346 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
347 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
348 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
352 * Copy in the new mailbox command and send it on its way ...
354 for (i = 0; i < size; i += 8, p++)
355 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
357 if (adap->flags & IS_VF) {
359 * For the VFs, the Mailbox Data "registers" are
360 * actually backed by T4's "MA" interface rather than
361 * PL Registers (as is the case for the PFs). Because
362 * these are in different coherency domains, the write
363 * to the VF's PL-register-backed Mailbox Control can
364 * race in front of the writes to the MA-backed VF
365 * Mailbox Data "registers". So we need to do a
366 * read-back on at least one byte of the VF Mailbox
367 * Data registers before doing the write to the VF
368 * Mailbox Control register.
370 t4_read_reg(adap, data_reg);
373 CH_DUMP_MBOX(adap, mbox, data_reg);
375 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
376 t4_read_reg(adap, ctl_reg); /* flush write */
382 * Loop waiting for the reply; bail out if we time out or the firmware
386 for (i = 0; i < timeout; i += ms) {
387 if (!(adap->flags & IS_VF)) {
388 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
389 if (pcie_fw & F_PCIE_FW_ERR)
393 ms = delay[delay_idx]; /* last element may repeat */
394 if (delay_idx < ARRAY_SIZE(delay) - 1)
401 v = t4_read_reg(adap, ctl_reg);
402 if (v == X_CIM_PF_NOACCESS)
404 if (G_MBOWNER(v) == X_MBOWNER_PL) {
405 if (!(v & F_MBMSGVALID)) {
406 t4_write_reg(adap, ctl_reg,
407 V_MBOWNER(X_MBOWNER_NONE));
412 * Retrieve the command reply and release the mailbox.
414 get_mbox_rpl(adap, cmd_rpl, MBOX_LEN/8, data_reg);
415 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
417 CH_DUMP_MBOX(adap, mbox, data_reg);
419 res = be64_to_cpu(cmd_rpl[0]);
420 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
421 fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl);
422 res = V_FW_CMD_RETVAL(EIO);
424 memcpy(rpl, cmd_rpl, size);
425 return -G_FW_CMD_RETVAL((int)res);
430 * We timed out waiting for a reply to our mailbox command. Report
431 * the error and also check to see if the firmware reported any
434 ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
435 CH_ERR(adap, "command %#x in mailbox %d timed out\n",
436 *(const u8 *)cmd, mbox);
438 /* If DUMP_MBOX is set the mbox has already been dumped */
439 if ((adap->debug_flags & DF_DUMP_MBOX) == 0) {
441 CH_ERR(adap, "mbox: %016llx %016llx %016llx %016llx "
442 "%016llx %016llx %016llx %016llx\n",
443 (unsigned long long)be64_to_cpu(p[0]),
444 (unsigned long long)be64_to_cpu(p[1]),
445 (unsigned long long)be64_to_cpu(p[2]),
446 (unsigned long long)be64_to_cpu(p[3]),
447 (unsigned long long)be64_to_cpu(p[4]),
448 (unsigned long long)be64_to_cpu(p[5]),
449 (unsigned long long)be64_to_cpu(p[6]),
450 (unsigned long long)be64_to_cpu(p[7]));
453 t4_report_fw_error(adap);
458 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
459 void *rpl, bool sleep_ok)
461 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl,
462 sleep_ok, FW_CMD_MAX_TIMEOUT);
466 static int t4_edc_err_read(struct adapter *adap, int idx)
468 u32 edc_ecc_err_addr_reg;
469 u32 edc_bist_status_rdata_reg;
472 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
475 if (idx != MEM_EDC0 && idx != MEM_EDC1) {
476 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
480 edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx);
481 edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx);
484 "edc%d err addr 0x%x: 0x%x.\n",
485 idx, edc_ecc_err_addr_reg,
486 t4_read_reg(adap, edc_ecc_err_addr_reg));
488 "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
489 edc_bist_status_rdata_reg,
490 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg),
491 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8),
492 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16),
493 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24),
494 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32),
495 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40),
496 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48),
497 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56),
498 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64));
504 * t4_mc_read - read from MC through backdoor accesses
506 * @idx: which MC to access
507 * @addr: address of first byte requested
508 * @data: 64 bytes of data containing the requested address
509 * @ecc: where to store the corresponding 64-bit ECC word
511 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
512 * that covers the requested address @addr. If @parity is not %NULL it
513 * is assigned the 64-bit ECC word for the read data.
515 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
518 u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
519 u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
522 mc_bist_cmd_reg = A_MC_BIST_CMD;
523 mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR;
524 mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN;
525 mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA;
526 mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN;
528 mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx);
529 mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx);
530 mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx);
531 mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA,
533 mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN,
537 if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST)
539 t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU);
540 t4_write_reg(adap, mc_bist_cmd_len_reg, 64);
541 t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc);
542 t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) |
543 F_START_BIST | V_BIST_CMD_GAP(1));
544 i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
548 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i)
550 for (i = 15; i >= 0; i--)
551 *data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
553 *ecc = t4_read_reg64(adap, MC_DATA(16));
559 * t4_edc_read - read from EDC through backdoor accesses
561 * @idx: which EDC to access
562 * @addr: address of first byte requested
563 * @data: 64 bytes of data containing the requested address
564 * @ecc: where to store the corresponding 64-bit ECC word
566 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
567 * that covers the requested address @addr. If @parity is not %NULL it
568 * is assigned the 64-bit ECC word for the read data.
570 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
573 u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
574 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
577 edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx);
578 edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx);
579 edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx);
580 edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN,
582 edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA,
586 * These macro are missing in t4_regs.h file.
587 * Added temporarily for testing.
589 #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
590 #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
591 edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx);
592 edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx);
593 edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx);
594 edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN,
596 edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA,
602 if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST)
604 t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU);
605 t4_write_reg(adap, edc_bist_cmd_len_reg, 64);
606 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
607 t4_write_reg(adap, edc_bist_cmd_reg,
608 V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
609 i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
613 #define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i)
615 for (i = 15; i >= 0; i--)
616 *data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
618 *ecc = t4_read_reg64(adap, EDC_DATA(16));
624 * t4_mem_read - read EDC 0, EDC 1 or MC into buffer
626 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
627 * @addr: address within indicated memory type
628 * @len: amount of memory to read
629 * @buf: host memory buffer
631 * Reads an [almost] arbitrary memory region in the firmware: the
632 * firmware memory address, length and host buffer must be aligned on
633 * 32-bit boudaries. The memory is returned as a raw byte sequence from
634 * the firmware's memory. If this memory contains data structures which
635 * contain multi-byte integers, it's the callers responsibility to
636 * perform appropriate byte order conversions.
638 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
641 u32 pos, start, end, offset;
645 * Argument sanity checks ...
647 if ((addr & 0x3) || (len & 0x3))
651 * The underlaying EDC/MC read routines read 64 bytes at a time so we
652 * need to round down the start and round up the end. We'll start
653 * copying out of the first line at (addr - start) a word at a time.
655 start = rounddown2(addr, 64);
656 end = roundup2(addr + len, 64);
657 offset = (addr - start)/sizeof(__be32);
659 for (pos = start; pos < end; pos += 64, offset = 0) {
663 * Read the chip's memory block and bail if there's an error.
665 if ((mtype == MEM_MC) || (mtype == MEM_MC1))
666 ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL);
668 ret = t4_edc_read(adap, mtype, pos, data, NULL);
673 * Copy the data into the caller's memory buffer.
675 while (offset < 16 && len > 0) {
676 *buf++ = data[offset++];
677 len -= sizeof(__be32);
685 * Return the specified PCI-E Configuration Space register from our Physical
686 * Function. We try first via a Firmware LDST Command (if fw_attach != 0)
687 * since we prefer to let the firmware own all of these registers, but if that
688 * fails we go for it directly ourselves.
690 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach)
694 * If fw_attach != 0, construct and send the Firmware LDST Command to
695 * retrieve the specified PCI-E Configuration Space register.
697 if (drv_fw_attach != 0) {
698 struct fw_ldst_cmd ldst_cmd;
701 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
702 ldst_cmd.op_to_addrspace =
703 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
706 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
707 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
708 ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1);
709 ldst_cmd.u.pcie.ctrl_to_fn =
710 (F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf));
711 ldst_cmd.u.pcie.r = reg;
714 * If the LDST Command succeeds, return the result, otherwise
715 * fall through to reading it directly ourselves ...
717 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
720 return be32_to_cpu(ldst_cmd.u.pcie.data[0]);
722 CH_WARN(adap, "Firmware failed to return "
723 "Configuration Space register %d, err = %d\n",
728 * Read the desired Configuration Space register via the PCI-E
729 * Backdoor mechanism.
731 return t4_hw_pci_read_cfg4(adap, reg);
735 * t4_get_regs_len - return the size of the chips register set
736 * @adapter: the adapter
738 * Returns the size of the chip's BAR0 register space.
740 unsigned int t4_get_regs_len(struct adapter *adapter)
742 unsigned int chip_version = chip_id(adapter);
744 switch (chip_version) {
746 if (adapter->flags & IS_VF)
747 return FW_T4VF_REGMAP_SIZE;
748 return T4_REGMAP_SIZE;
752 if (adapter->flags & IS_VF)
753 return FW_T4VF_REGMAP_SIZE;
754 return T5_REGMAP_SIZE;
758 "Unsupported chip version %d\n", chip_version);
763 * t4_get_regs - read chip registers into provided buffer
765 * @buf: register buffer
766 * @buf_size: size (in bytes) of register buffer
768 * If the provided register buffer isn't large enough for the chip's
769 * full register range, the register dump will be truncated to the
770 * register buffer's size.
772 void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
774 static const unsigned int t4_reg_ranges[] = {
1233 static const unsigned int t4vf_reg_ranges[] = {
1234 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
1235 VF_MPS_REG(A_MPS_VF_CTL),
1236 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
1237 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_WHOAMI),
1238 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
1239 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
1240 FW_T4VF_MBDATA_BASE_ADDR,
1241 FW_T4VF_MBDATA_BASE_ADDR +
1242 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
1245 static const unsigned int t5_reg_ranges[] = {
2012 static const unsigned int t5vf_reg_ranges[] = {
2013 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
2014 VF_MPS_REG(A_MPS_VF_CTL),
2015 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
2016 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION),
2017 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
2018 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
2019 FW_T4VF_MBDATA_BASE_ADDR,
2020 FW_T4VF_MBDATA_BASE_ADDR +
2021 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
2024 static const unsigned int t6_reg_ranges[] = {
2585 static const unsigned int t6vf_reg_ranges[] = {
2586 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
2587 VF_MPS_REG(A_MPS_VF_CTL),
2588 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
2589 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION),
2590 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
2591 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
2592 FW_T6VF_MBDATA_BASE_ADDR,
2593 FW_T6VF_MBDATA_BASE_ADDR +
2594 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
2597 u32 *buf_end = (u32 *)(buf + buf_size);
2598 const unsigned int *reg_ranges;
2599 int reg_ranges_size, range;
2600 unsigned int chip_version = chip_id(adap);
2603 * Select the right set of register ranges to dump depending on the
2604 * adapter chip type.
2606 switch (chip_version) {
2608 if (adap->flags & IS_VF) {
2609 reg_ranges = t4vf_reg_ranges;
2610 reg_ranges_size = ARRAY_SIZE(t4vf_reg_ranges);
2612 reg_ranges = t4_reg_ranges;
2613 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
2618 if (adap->flags & IS_VF) {
2619 reg_ranges = t5vf_reg_ranges;
2620 reg_ranges_size = ARRAY_SIZE(t5vf_reg_ranges);
2622 reg_ranges = t5_reg_ranges;
2623 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
2628 if (adap->flags & IS_VF) {
2629 reg_ranges = t6vf_reg_ranges;
2630 reg_ranges_size = ARRAY_SIZE(t6vf_reg_ranges);
2632 reg_ranges = t6_reg_ranges;
2633 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
2639 "Unsupported chip version %d\n", chip_version);
2644 * Clear the register buffer and insert the appropriate register
2645 * values selected by the above register ranges.
2647 memset(buf, 0, buf_size);
2648 for (range = 0; range < reg_ranges_size; range += 2) {
2649 unsigned int reg = reg_ranges[range];
2650 unsigned int last_reg = reg_ranges[range + 1];
2651 u32 *bufp = (u32 *)(buf + reg);
2654 * Iterate across the register range filling in the register
2655 * buffer but don't write past the end of the register buffer.
2657 while (reg <= last_reg && bufp < buf_end) {
2658 *bufp++ = t4_read_reg(adap, reg);
2665 * Partial EEPROM Vital Product Data structure. The VPD starts with one ID
2666 * header followed by one or more VPD-R sections, each with its own header.
2674 struct t4_vpdr_hdr {
2680 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
2682 #define EEPROM_DELAY 10 /* 10us per poll spin */
2683 #define EEPROM_MAX_POLL 5000 /* x 5000 == 50ms */
2685 #define EEPROM_STAT_ADDR 0x7bfc
2686 #define VPD_SIZE 0x800
2687 #define VPD_BASE 0x400
2688 #define VPD_BASE_OLD 0
2689 #define VPD_LEN 1024
2690 #define VPD_INFO_FLD_HDR_SIZE 3
2691 #define CHELSIO_VPD_UNIQUE_ID 0x82
2694 * Small utility function to wait till any outstanding VPD Access is complete.
2695 * We have a per-adapter state variable "VPD Busy" to indicate when we have a
2696 * VPD Access in flight. This allows us to handle the problem of having a
2697 * previous VPD Access time out and prevent an attempt to inject a new VPD
2698 * Request before any in-flight VPD reguest has completed.
2700 static int t4_seeprom_wait(struct adapter *adapter)
2702 unsigned int base = adapter->params.pci.vpd_cap_addr;
2706 * If no VPD Access is in flight, we can just return success right
2709 if (!adapter->vpd_busy)
2713 * Poll the VPD Capability Address/Flag register waiting for it
2714 * to indicate that the operation is complete.
2716 max_poll = EEPROM_MAX_POLL;
2720 udelay(EEPROM_DELAY);
2721 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
2724 * If the operation is complete, mark the VPD as no longer
2725 * busy and return success.
2727 if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
2728 adapter->vpd_busy = 0;
2731 } while (--max_poll);
2734 * Failure! Note that we leave the VPD Busy status set in order to
2735 * avoid pushing a new VPD Access request into the VPD Capability till
2736 * the current operation eventually succeeds. It's a bug to issue a
2737 * new request when an existing request is in flight and will result
2738 * in corrupt hardware state.
2744 * t4_seeprom_read - read a serial EEPROM location
2745 * @adapter: adapter to read
2746 * @addr: EEPROM virtual address
2747 * @data: where to store the read data
2749 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
2750 * VPD capability. Note that this function must be called with a virtual
2753 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
2755 unsigned int base = adapter->params.pci.vpd_cap_addr;
2759 * VPD Accesses must alway be 4-byte aligned!
2761 if (addr >= EEPROMVSIZE || (addr & 3))
2765 * Wait for any previous operation which may still be in flight to
2768 ret = t4_seeprom_wait(adapter);
2770 CH_ERR(adapter, "VPD still busy from previous operation\n");
2775 * Issue our new VPD Read request, mark the VPD as being busy and wait
2776 * for our request to complete. If it doesn't complete, note the
2777 * error and return it to our caller. Note that we do not reset the
2780 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
2781 adapter->vpd_busy = 1;
2782 adapter->vpd_flag = PCI_VPD_ADDR_F;
2783 ret = t4_seeprom_wait(adapter);
2785 CH_ERR(adapter, "VPD read of address %#x failed\n", addr);
2790 * Grab the returned data, swizzle it into our endianness and
2793 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
2794 *data = le32_to_cpu(*data);
2799 * t4_seeprom_write - write a serial EEPROM location
2800 * @adapter: adapter to write
2801 * @addr: virtual EEPROM address
2802 * @data: value to write
2804 * Write a 32-bit word to a location in serial EEPROM using the card's PCI
2805 * VPD capability. Note that this function must be called with a virtual
2808 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
2810 unsigned int base = adapter->params.pci.vpd_cap_addr;
2816 * VPD Accesses must alway be 4-byte aligned!
2818 if (addr >= EEPROMVSIZE || (addr & 3))
2822 * Wait for any previous operation which may still be in flight to
2825 ret = t4_seeprom_wait(adapter);
2827 CH_ERR(adapter, "VPD still busy from previous operation\n");
2832 * Issue our new VPD Read request, mark the VPD as being busy and wait
2833 * for our request to complete. If it doesn't complete, note the
2834 * error and return it to our caller. Note that we do not reset the
2837 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
2839 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
2840 (u16)addr | PCI_VPD_ADDR_F);
2841 adapter->vpd_busy = 1;
2842 adapter->vpd_flag = 0;
2843 ret = t4_seeprom_wait(adapter);
2845 CH_ERR(adapter, "VPD write of address %#x failed\n", addr);
2850 * Reset PCI_VPD_DATA register after a transaction and wait for our
2851 * request to complete. If it doesn't complete, return error.
2853 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
2854 max_poll = EEPROM_MAX_POLL;
2856 udelay(EEPROM_DELAY);
2857 t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
2858 } while ((stats_reg & 0x1) && --max_poll);
2862 /* Return success! */
2867 * t4_eeprom_ptov - translate a physical EEPROM address to virtual
2868 * @phys_addr: the physical EEPROM address
2869 * @fn: the PCI function number
2870 * @sz: size of function-specific area
2872 * Translate a physical EEPROM address to virtual. The first 1K is
2873 * accessed through virtual addresses starting at 31K, the rest is
2874 * accessed through virtual addresses starting at 0.
2876 * The mapping is as follows:
2877 * [0..1K) -> [31K..32K)
2878 * [1K..1K+A) -> [ES-A..ES)
2879 * [1K+A..ES) -> [0..ES-A-1K)
2881 * where A = @fn * @sz, and ES = EEPROM size.
2883 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2886 if (phys_addr < 1024)
2887 return phys_addr + (31 << 10);
2888 if (phys_addr < 1024 + fn)
2889 return EEPROMSIZE - fn + phys_addr - 1024;
2890 if (phys_addr < EEPROMSIZE)
2891 return phys_addr - 1024 - fn;
2896 * t4_seeprom_wp - enable/disable EEPROM write protection
2897 * @adapter: the adapter
2898 * @enable: whether to enable or disable write protection
2900 * Enables or disables write protection on the serial EEPROM.
2902 int t4_seeprom_wp(struct adapter *adapter, int enable)
2904 return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
2908 * get_vpd_keyword_val - Locates an information field keyword in the VPD
2909 * @vpd: Pointer to buffered vpd data structure
2910 * @kw: The keyword to search for
2911 * @region: VPD region to search (starting from 0)
2913 * Returns the value of the information field keyword or
2914 * -ENOENT otherwise.
2916 static int get_vpd_keyword_val(const u8 *vpd, const char *kw, int region)
2919 unsigned int offset, len;
2920 const struct t4_vpdr_hdr *vpdr;
2922 offset = sizeof(struct t4_vpd_hdr);
2923 vpdr = (const void *)(vpd + offset);
2924 tag = vpdr->vpdr_tag;
2925 len = (u16)vpdr->vpdr_len[0] + ((u16)vpdr->vpdr_len[1] << 8);
2927 offset += sizeof(struct t4_vpdr_hdr) + len;
2928 vpdr = (const void *)(vpd + offset);
2929 if (++tag != vpdr->vpdr_tag)
2931 len = (u16)vpdr->vpdr_len[0] + ((u16)vpdr->vpdr_len[1] << 8);
2933 offset += sizeof(struct t4_vpdr_hdr);
2935 if (offset + len > VPD_LEN) {
2939 for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
2940 if (memcmp(vpd + i , kw , 2) == 0){
2941 i += VPD_INFO_FLD_HDR_SIZE;
2945 i += VPD_INFO_FLD_HDR_SIZE + vpd[i+2];
2953 * get_vpd_params - read VPD parameters from VPD EEPROM
2954 * @adapter: adapter to read
2955 * @p: where to store the parameters
2956 * @vpd: caller provided temporary space to read the VPD into
2958 * Reads card parameters stored in VPD EEPROM.
2960 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p,
2961 uint16_t device_id, u32 *buf)
2964 int ec, sn, pn, na, md;
2966 const u8 *vpd = (const u8 *)buf;
2969 * Card information normally starts at VPD_BASE but early cards had
2972 ret = t4_seeprom_read(adapter, VPD_BASE, buf);
2977 * The VPD shall have a unique identifier specified by the PCI SIG.
2978 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
2979 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
2980 * is expected to automatically put this entry at the
2981 * beginning of the VPD.
2983 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
2985 for (i = 0; i < VPD_LEN; i += 4) {
2986 ret = t4_seeprom_read(adapter, addr + i, buf++);
2991 #define FIND_VPD_KW(var,name) do { \
2992 var = get_vpd_keyword_val(vpd, name, 0); \
2994 CH_ERR(adapter, "missing VPD keyword " name "\n"); \
2999 FIND_VPD_KW(i, "RV");
3000 for (csum = 0; i >= 0; i--)
3005 "corrupted VPD EEPROM, actual csum %u\n", csum);
3009 FIND_VPD_KW(ec, "EC");
3010 FIND_VPD_KW(sn, "SN");
3011 FIND_VPD_KW(pn, "PN");
3012 FIND_VPD_KW(na, "NA");
3015 memcpy(p->id, vpd + offsetof(struct t4_vpd_hdr, id_data), ID_LEN);
3017 memcpy(p->ec, vpd + ec, EC_LEN);
3019 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
3020 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
3022 i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2];
3023 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
3024 strstrip((char *)p->pn);
3025 i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2];
3026 memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
3027 strstrip((char *)p->na);
3029 if (device_id & 0x80)
3030 return 0; /* Custom card */
3032 md = get_vpd_keyword_val(vpd, "VF", 1);
3034 snprintf(p->md, sizeof(p->md), "unknown");
3036 i = vpd[md - VPD_INFO_FLD_HDR_SIZE + 2];
3037 memcpy(p->md, vpd + md, min(i, MD_LEN));
3038 strstrip((char *)p->md);
3044 /* serial flash and firmware constants and flash config file constants */
3046 SF_ATTEMPTS = 10, /* max retries for SF operations */
3048 /* flash command opcodes */
3049 SF_PROG_PAGE = 2, /* program 256B page */
3050 SF_WR_DISABLE = 4, /* disable writes */
3051 SF_RD_STATUS = 5, /* read status register */
3052 SF_WR_ENABLE = 6, /* enable writes */
3053 SF_RD_DATA_FAST = 0xb, /* read flash */
3054 SF_RD_ID = 0x9f, /* read ID */
3055 SF_ERASE_SECTOR = 0xd8, /* erase 64KB sector */
3059 * sf1_read - read data from the serial flash
3060 * @adapter: the adapter
3061 * @byte_cnt: number of bytes to read
3062 * @cont: whether another operation will be chained
3063 * @lock: whether to lock SF for PL access only
3064 * @valp: where to store the read data
3066 * Reads up to 4 bytes of data from the serial flash. The location of
3067 * the read needs to be specified prior to calling this by issuing the
3068 * appropriate commands to the serial flash.
3070 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
3071 int lock, u32 *valp)
3075 if (!byte_cnt || byte_cnt > 4)
3077 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3079 t4_write_reg(adapter, A_SF_OP,
3080 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
3081 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3083 *valp = t4_read_reg(adapter, A_SF_DATA);
3088 * sf1_write - write data to the serial flash
3089 * @adapter: the adapter
3090 * @byte_cnt: number of bytes to write
3091 * @cont: whether another operation will be chained
3092 * @lock: whether to lock SF for PL access only
3093 * @val: value to write
3095 * Writes up to 4 bytes of data to the serial flash. The location of
3096 * the write needs to be specified prior to calling this by issuing the
3097 * appropriate commands to the serial flash.
3099 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
3102 if (!byte_cnt || byte_cnt > 4)
3104 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3106 t4_write_reg(adapter, A_SF_DATA, val);
3107 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
3108 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
3109 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3113 * flash_wait_op - wait for a flash operation to complete
3114 * @adapter: the adapter
3115 * @attempts: max number of polls of the status register
3116 * @delay: delay between polls in ms
3118 * Wait for a flash operation to complete by polling the status register.
3120 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
3126 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
3127 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
3131 if (--attempts == 0)
3139 * t4_read_flash - read words from serial flash
3140 * @adapter: the adapter
3141 * @addr: the start address for the read
3142 * @nwords: how many 32-bit words to read
3143 * @data: where to store the read data
3144 * @byte_oriented: whether to store data as bytes or as words
3146 * Read the specified number of 32-bit words from the serial flash.
3147 * If @byte_oriented is set the read data is stored as a byte array
3148 * (i.e., big-endian), otherwise as 32-bit words in the platform's
3149 * natural endianness.
3151 int t4_read_flash(struct adapter *adapter, unsigned int addr,
3152 unsigned int nwords, u32 *data, int byte_oriented)
3156 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
3159 addr = swab32(addr) | SF_RD_DATA_FAST;
3161 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
3162 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
3165 for ( ; nwords; nwords--, data++) {
3166 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
3168 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3172 *data = (__force __u32)(cpu_to_be32(*data));
3178 * t4_write_flash - write up to a page of data to the serial flash
3179 * @adapter: the adapter
3180 * @addr: the start address to write
3181 * @n: length of data to write in bytes
3182 * @data: the data to write
3183 * @byte_oriented: whether to store data as bytes or as words
3185 * Writes up to a page of data (256 bytes) to the serial flash starting
3186 * at the given address. All the data must be written to the same page.
3187 * If @byte_oriented is set the write data is stored as byte stream
3188 * (i.e. matches what on disk), otherwise in big-endian.
3190 int t4_write_flash(struct adapter *adapter, unsigned int addr,
3191 unsigned int n, const u8 *data, int byte_oriented)
3194 u32 buf[SF_PAGE_SIZE / 4];
3195 unsigned int i, c, left, val, offset = addr & 0xff;
3197 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
3200 val = swab32(addr) | SF_PROG_PAGE;
3202 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3203 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
3206 for (left = n; left; left -= c) {
3208 for (val = 0, i = 0; i < c; ++i)
3209 val = (val << 8) + *data++;
3212 val = cpu_to_be32(val);
3214 ret = sf1_write(adapter, c, c != left, 1, val);
3218 ret = flash_wait_op(adapter, 8, 1);
3222 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3224 /* Read the page to verify the write succeeded */
3225 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
3230 if (memcmp(data - n, (u8 *)buf + offset, n)) {
3232 "failed to correctly write the flash page at %#x\n",
3239 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3244 * t4_get_fw_version - read the firmware version
3245 * @adapter: the adapter
3246 * @vers: where to place the version
3248 * Reads the FW version from flash.
3250 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
3252 return t4_read_flash(adapter, FLASH_FW_START +
3253 offsetof(struct fw_hdr, fw_ver), 1,
3258 * t4_get_bs_version - read the firmware bootstrap version
3259 * @adapter: the adapter
3260 * @vers: where to place the version
3262 * Reads the FW Bootstrap version from flash.
3264 int t4_get_bs_version(struct adapter *adapter, u32 *vers)
3266 return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
3267 offsetof(struct fw_hdr, fw_ver), 1,
3272 * t4_get_tp_version - read the TP microcode version
3273 * @adapter: the adapter
3274 * @vers: where to place the version
3276 * Reads the TP microcode version from flash.
3278 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
3280 return t4_read_flash(adapter, FLASH_FW_START +
3281 offsetof(struct fw_hdr, tp_microcode_ver),
3286 * t4_get_exprom_version - return the Expansion ROM version (if any)
3287 * @adapter: the adapter
3288 * @vers: where to place the version
3290 * Reads the Expansion ROM header from FLASH and returns the version
3291 * number (if present) through the @vers return value pointer. We return
3292 * this in the Firmware Version Format since it's convenient. Return
3293 * 0 on success, -ENOENT if no Expansion ROM is present.
3295 int t4_get_exprom_version(struct adapter *adap, u32 *vers)
3297 struct exprom_header {
3298 unsigned char hdr_arr[16]; /* must start with 0x55aa */
3299 unsigned char hdr_ver[4]; /* Expansion ROM version */
3301 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
3305 ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
3306 ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
3311 hdr = (struct exprom_header *)exprom_header_buf;
3312 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
3315 *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
3316 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
3317 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
3318 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
3323 * t4_get_scfg_version - return the Serial Configuration version
3324 * @adapter: the adapter
3325 * @vers: where to place the version
3327 * Reads the Serial Configuration Version via the Firmware interface
3328 * (thus this can only be called once we're ready to issue Firmware
3329 * commands). The format of the Serial Configuration version is
3330 * adapter specific. Returns 0 on success, an error on failure.
3332 * Note that early versions of the Firmware didn't include the ability
3333 * to retrieve the Serial Configuration version, so we zero-out the
3334 * return-value parameter in that case to avoid leaving it with
3337 * Also note that the Firmware will return its cached copy of the Serial
3338 * Initialization Revision ID, not the actual Revision ID as written in
3339 * the Serial EEPROM. This is only an issue if a new VPD has been written
3340 * and the Firmware/Chip haven't yet gone through a RESET sequence. So
3341 * it's best to defer calling this routine till after a FW_RESET_CMD has
3342 * been issued if the Host Driver will be performing a full adapter
3345 int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
3350 scfgrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3351 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_SCFGREV));
3352 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3353 1, &scfgrev_param, vers);
3360 * t4_get_vpd_version - return the VPD version
3361 * @adapter: the adapter
3362 * @vers: where to place the version
3364 * Reads the VPD via the Firmware interface (thus this can only be called
3365 * once we're ready to issue Firmware commands). The format of the
3366 * VPD version is adapter specific. Returns 0 on success, an error on
3369 * Note that early versions of the Firmware didn't include the ability
3370 * to retrieve the VPD version, so we zero-out the return-value parameter
3371 * in that case to avoid leaving it with garbage in it.
3373 * Also note that the Firmware will return its cached copy of the VPD
3374 * Revision ID, not the actual Revision ID as written in the Serial
3375 * EEPROM. This is only an issue if a new VPD has been written and the
3376 * Firmware/Chip haven't yet gone through a RESET sequence. So it's best
3377 * to defer calling this routine till after a FW_RESET_CMD has been issued
3378 * if the Host Driver will be performing a full adapter initialization.
3380 int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
3385 vpdrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3386 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_VPDREV));
3387 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3388 1, &vpdrev_param, vers);
3395 * t4_get_version_info - extract various chip/firmware version information
3396 * @adapter: the adapter
3398 * Reads various chip/firmware version numbers and stores them into the
3399 * adapter Adapter Parameters structure. If any of the efforts fails
3400 * the first failure will be returned, but all of the version numbers
3403 int t4_get_version_info(struct adapter *adapter)
3407 #define FIRST_RET(__getvinfo) \
3409 int __ret = __getvinfo; \
3410 if (__ret && !ret) \
3414 FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
3415 FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
3416 FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
3417 FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
3418 FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
3419 FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
3427 * t4_flash_erase_sectors - erase a range of flash sectors
3428 * @adapter: the adapter
3429 * @start: the first sector to erase
3430 * @end: the last sector to erase
3432 * Erases the sectors in the given inclusive range.
3434 int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
3438 if (end >= adapter->params.sf_nsec)
3441 while (start <= end) {
3442 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3443 (ret = sf1_write(adapter, 4, 0, 1,
3444 SF_ERASE_SECTOR | (start << 8))) != 0 ||
3445 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
3447 "erase of flash sector %d failed, error %d\n",
3453 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3458 * t4_flash_cfg_addr - return the address of the flash configuration file
3459 * @adapter: the adapter
3461 * Return the address within the flash where the Firmware Configuration
3462 * File is stored, or an error if the device FLASH is too small to contain
3463 * a Firmware Configuration File.
3465 int t4_flash_cfg_addr(struct adapter *adapter)
3468 * If the device FLASH isn't large enough to hold a Firmware
3469 * Configuration File, return an error.
3471 if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
3474 return FLASH_CFG_START;
3478 * Return TRUE if the specified firmware matches the adapter. I.e. T4
3479 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
3480 * and emit an error message for mismatched firmware to save our caller the
3483 static int t4_fw_matches_chip(struct adapter *adap,
3484 const struct fw_hdr *hdr)
3487 * The expression below will return FALSE for any unsupported adapter
3488 * which will keep us "honest" in the future ...
3490 if ((is_t4(adap) && hdr->chip == FW_HDR_CHIP_T4) ||
3491 (is_t5(adap) && hdr->chip == FW_HDR_CHIP_T5) ||
3492 (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6))
3496 "FW image (%d) is not suitable for this adapter (%d)\n",
3497 hdr->chip, chip_id(adap));
3502 * t4_load_fw - download firmware
3503 * @adap: the adapter
3504 * @fw_data: the firmware image to write
3507 * Write the supplied firmware image to the card's serial flash.
3509 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3514 u8 first_page[SF_PAGE_SIZE];
3515 const u32 *p = (const u32 *)fw_data;
3516 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
3517 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
3518 unsigned int fw_start_sec;
3519 unsigned int fw_start;
3520 unsigned int fw_size;
3522 if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) {
3523 fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
3524 fw_start = FLASH_FWBOOTSTRAP_START;
3525 fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
3527 fw_start_sec = FLASH_FW_START_SEC;
3528 fw_start = FLASH_FW_START;
3529 fw_size = FLASH_FW_MAX_SIZE;
3533 CH_ERR(adap, "FW image has no data\n");
3538 "FW image size not multiple of 512 bytes\n");
3541 if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) {
3543 "FW image size differs from size in FW header\n");
3546 if (size > fw_size) {
3547 CH_ERR(adap, "FW image too large, max is %u bytes\n",
3551 if (!t4_fw_matches_chip(adap, hdr))
3554 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
3555 csum += be32_to_cpu(p[i]);
3557 if (csum != 0xffffffff) {
3559 "corrupted firmware image, checksum %#x\n", csum);
3563 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
3564 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
3569 * We write the correct version at the end so the driver can see a bad
3570 * version if the FW write fails. Start by writing a copy of the
3571 * first page with a bad version.
3573 memcpy(first_page, fw_data, SF_PAGE_SIZE);
3574 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
3575 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
3580 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
3581 addr += SF_PAGE_SIZE;
3582 fw_data += SF_PAGE_SIZE;
3583 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
3588 ret = t4_write_flash(adap,
3589 fw_start + offsetof(struct fw_hdr, fw_ver),
3590 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
3593 CH_ERR(adap, "firmware download failed, error %d\n",
3599 * t4_fwcache - firmware cache operation
3600 * @adap: the adapter
3601 * @op : the operation (flush or flush and invalidate)
3603 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
3605 struct fw_params_cmd c;
3607 memset(&c, 0, sizeof(c));
3609 cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
3610 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
3611 V_FW_PARAMS_CMD_PFN(adap->pf) |
3612 V_FW_PARAMS_CMD_VFN(0));
3613 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3615 cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3616 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE));
3617 c.param[0].val = (__force __be32)op;
3619 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
3622 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
3623 unsigned int *pif_req_wrptr,
3624 unsigned int *pif_rsp_wrptr)
3627 u32 cfg, val, req, rsp;
3629 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
3630 if (cfg & F_LADBGEN)
3631 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
3633 val = t4_read_reg(adap, A_CIM_DEBUGSTS);
3634 req = G_POLADBGWRPTR(val);
3635 rsp = G_PILADBGWRPTR(val);
3637 *pif_req_wrptr = req;
3639 *pif_rsp_wrptr = rsp;
3641 for (i = 0; i < CIM_PIFLA_SIZE; i++) {
3642 for (j = 0; j < 6; j++) {
3643 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
3644 V_PILADBGRDPTR(rsp));
3645 *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
3646 *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
3650 req = (req + 2) & M_POLADBGRDPTR;
3651 rsp = (rsp + 2) & M_PILADBGRDPTR;
3653 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
3656 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
3661 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
3662 if (cfg & F_LADBGEN)
3663 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
3665 for (i = 0; i < CIM_MALA_SIZE; i++) {
3666 for (j = 0; j < 5; j++) {
3668 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
3669 V_PILADBGRDPTR(idx));
3670 *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
3671 *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
3674 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
3677 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
3681 for (i = 0; i < 8; i++) {
3682 u32 *p = la_buf + i;
3684 t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
3685 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
3686 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
3687 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
3688 *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
3693 * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
3694 * @caps16: a 16-bit Port Capabilities value
3696 * Returns the equivalent 32-bit Port Capabilities value.
3698 static uint32_t fwcaps16_to_caps32(uint16_t caps16)
3700 uint32_t caps32 = 0;
3702 #define CAP16_TO_CAP32(__cap) \
3704 if (caps16 & FW_PORT_CAP_##__cap) \
3705 caps32 |= FW_PORT_CAP32_##__cap; \
3708 CAP16_TO_CAP32(SPEED_100M);
3709 CAP16_TO_CAP32(SPEED_1G);
3710 CAP16_TO_CAP32(SPEED_25G);
3711 CAP16_TO_CAP32(SPEED_10G);
3712 CAP16_TO_CAP32(SPEED_40G);
3713 CAP16_TO_CAP32(SPEED_100G);
3714 CAP16_TO_CAP32(FC_RX);
3715 CAP16_TO_CAP32(FC_TX);
3716 CAP16_TO_CAP32(ANEG);
3717 CAP16_TO_CAP32(FORCE_PAUSE);
3718 CAP16_TO_CAP32(MDIAUTO);
3719 CAP16_TO_CAP32(MDISTRAIGHT);
3720 CAP16_TO_CAP32(FEC_RS);
3721 CAP16_TO_CAP32(FEC_BASER_RS);
3722 CAP16_TO_CAP32(802_3_PAUSE);
3723 CAP16_TO_CAP32(802_3_ASM_DIR);
3725 #undef CAP16_TO_CAP32
3731 * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
3732 * @caps32: a 32-bit Port Capabilities value
3734 * Returns the equivalent 16-bit Port Capabilities value. Note that
3735 * not all 32-bit Port Capabilities can be represented in the 16-bit
3736 * Port Capabilities and some fields/values may not make it.
3738 static uint16_t fwcaps32_to_caps16(uint32_t caps32)
3740 uint16_t caps16 = 0;
3742 #define CAP32_TO_CAP16(__cap) \
3744 if (caps32 & FW_PORT_CAP32_##__cap) \
3745 caps16 |= FW_PORT_CAP_##__cap; \
3748 CAP32_TO_CAP16(SPEED_100M);
3749 CAP32_TO_CAP16(SPEED_1G);
3750 CAP32_TO_CAP16(SPEED_10G);
3751 CAP32_TO_CAP16(SPEED_25G);
3752 CAP32_TO_CAP16(SPEED_40G);
3753 CAP32_TO_CAP16(SPEED_100G);
3754 CAP32_TO_CAP16(FC_RX);
3755 CAP32_TO_CAP16(FC_TX);
3756 CAP32_TO_CAP16(802_3_PAUSE);
3757 CAP32_TO_CAP16(802_3_ASM_DIR);
3758 CAP32_TO_CAP16(ANEG);
3759 CAP32_TO_CAP16(FORCE_PAUSE);
3760 CAP32_TO_CAP16(MDIAUTO);
3761 CAP32_TO_CAP16(MDISTRAIGHT);
3762 CAP32_TO_CAP16(FEC_RS);
3763 CAP32_TO_CAP16(FEC_BASER_RS);
3765 #undef CAP32_TO_CAP16
3771 is_bt(struct port_info *pi)
3774 return (pi->port_type == FW_PORT_TYPE_BT_SGMII ||
3775 pi->port_type == FW_PORT_TYPE_BT_XFI ||
3776 pi->port_type == FW_PORT_TYPE_BT_XAUI);
3780 * t4_link_l1cfg - apply link configuration to MAC/PHY
3781 * @phy: the PHY to setup
3782 * @mac: the MAC to setup
3783 * @lc: the requested link configuration
3785 * Set up a port's MAC and PHY according to a desired link configuration.
3786 * - If the PHY can auto-negotiate first decide what to advertise, then
3787 * enable/disable auto-negotiation as desired, and reset.
3788 * - If the PHY does not auto-negotiate just reset it.
3789 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
3790 * otherwise do it later based on the outcome of auto-negotiation.
3792 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
3793 struct link_config *lc)
3795 struct fw_port_cmd c;
3796 unsigned int mdi = V_FW_PORT_CAP32_MDI(FW_PORT_CAP32_MDI_AUTO);
3797 unsigned int aneg, fc, fec, speed, rcap;
3800 if (lc->requested_fc & PAUSE_RX)
3801 fc |= FW_PORT_CAP32_FC_RX;
3802 if (lc->requested_fc & PAUSE_TX)
3803 fc |= FW_PORT_CAP32_FC_TX;
3804 if (!(lc->requested_fc & PAUSE_AUTONEG))
3805 fc |= FW_PORT_CAP32_FORCE_PAUSE;
3808 if (lc->requested_fec == FEC_AUTO)
3811 if (lc->requested_fec & FEC_RS)
3812 fec |= FW_PORT_CAP32_FEC_RS;
3813 if (lc->requested_fec & FEC_BASER_RS)
3814 fec |= FW_PORT_CAP32_FEC_BASER_RS;
3817 if (lc->requested_aneg == AUTONEG_DISABLE)
3819 else if (lc->requested_aneg == AUTONEG_ENABLE)
3820 aneg = FW_PORT_CAP32_ANEG;
3822 aneg = lc->supported & FW_PORT_CAP32_ANEG;
3825 speed = lc->supported & V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED);
3826 } else if (lc->requested_speed != 0)
3827 speed = speed_to_fwcap(lc->requested_speed);
3829 speed = fwcap_top_speed(lc->supported);
3831 /* Force AN on for BT cards. */
3832 if (is_bt(adap->port[port]))
3833 aneg = lc->supported & FW_PORT_CAP32_ANEG;
3835 rcap = aneg | speed | fc | fec;
3836 if ((rcap | lc->supported) != lc->supported) {
3838 CH_WARN(adap, "rcap 0x%08x, pcap 0x%08x\n", rcap,
3841 rcap &= lc->supported;
3845 memset(&c, 0, sizeof(c));
3846 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
3847 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
3848 V_FW_PORT_CMD_PORTID(port));
3849 if (adap->params.port_caps32) {
3851 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG32) |
3853 c.u.l1cfg32.rcap32 = cpu_to_be32(rcap);
3856 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
3858 c.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap));
3861 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
3865 * t4_restart_aneg - restart autonegotiation
3866 * @adap: the adapter
3867 * @mbox: mbox to use for the FW command
3868 * @port: the port id
3870 * Restarts autonegotiation for the selected port.
3872 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
3874 struct fw_port_cmd c;
3876 memset(&c, 0, sizeof(c));
3877 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
3878 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
3879 V_FW_PORT_CMD_PORTID(port));
3881 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
3883 c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
3884 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3887 typedef void (*int_handler_t)(struct adapter *adap);
3890 unsigned int mask; /* bits to check in interrupt status */
3891 const char *msg; /* message to print or NULL */
3892 short stat_idx; /* stat counter to increment or -1 */
3893 unsigned short fatal; /* whether the condition reported is fatal */
3894 int_handler_t int_handler; /* platform-specific int handler */
3898 * t4_handle_intr_status - table driven interrupt handler
3899 * @adapter: the adapter that generated the interrupt
3900 * @reg: the interrupt status register to process
3901 * @acts: table of interrupt actions
3903 * A table driven interrupt handler that applies a set of masks to an
3904 * interrupt status word and performs the corresponding actions if the
3905 * interrupts described by the mask have occurred. The actions include
3906 * optionally emitting a warning or alert message. The table is terminated
3907 * by an entry specifying mask 0. Returns the number of fatal interrupt
3910 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
3911 const struct intr_info *acts)
3914 unsigned int mask = 0;
3915 unsigned int status = t4_read_reg(adapter, reg);
3917 for ( ; acts->mask; ++acts) {
3918 if (!(status & acts->mask))
3922 CH_ALERT(adapter, "%s (0x%x)\n", acts->msg,
3923 status & acts->mask);
3924 } else if (acts->msg)
3925 CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n", acts->msg,
3926 status & acts->mask);
3927 if (acts->int_handler)
3928 acts->int_handler(adapter);
3932 if (status) /* clear processed interrupts */
3933 t4_write_reg(adapter, reg, status);
3938 * Interrupt handler for the PCIE module.
3940 static void pcie_intr_handler(struct adapter *adapter)
3942 static const struct intr_info sysbus_intr_info[] = {
3943 { F_RNPP, "RXNP array parity error", -1, 1 },
3944 { F_RPCP, "RXPC array parity error", -1, 1 },
3945 { F_RCIP, "RXCIF array parity error", -1, 1 },
3946 { F_RCCP, "Rx completions control array parity error", -1, 1 },
3947 { F_RFTP, "RXFT array parity error", -1, 1 },
3950 static const struct intr_info pcie_port_intr_info[] = {
3951 { F_TPCP, "TXPC array parity error", -1, 1 },
3952 { F_TNPP, "TXNP array parity error", -1, 1 },
3953 { F_TFTP, "TXFT array parity error", -1, 1 },
3954 { F_TCAP, "TXCA array parity error", -1, 1 },
3955 { F_TCIP, "TXCIF array parity error", -1, 1 },
3956 { F_RCAP, "RXCA array parity error", -1, 1 },
3957 { F_OTDD, "outbound request TLP discarded", -1, 1 },
3958 { F_RDPE, "Rx data parity error", -1, 1 },
3959 { F_TDUE, "Tx uncorrectable data error", -1, 1 },
3962 static const struct intr_info pcie_intr_info[] = {
3963 { F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
3964 { F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
3965 { F_MSIDATAPERR, "MSI data parity error", -1, 1 },
3966 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
3967 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
3968 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
3969 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
3970 { F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
3971 { F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
3972 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
3973 { F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
3974 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
3975 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
3976 { F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
3977 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
3978 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
3979 { F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
3980 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
3981 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
3982 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
3983 { F_FIDPERR, "PCI FID parity error", -1, 1 },
3984 { F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
3985 { F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
3986 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
3987 { F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
3988 { F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
3989 { F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
3990 { F_PCIESINT, "PCI core secondary fault", -1, 1 },
3991 { F_PCIEPINT, "PCI core primary fault", -1, 1 },
3992 { F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
3997 static const struct intr_info t5_pcie_intr_info[] = {
3998 { F_MSTGRPPERR, "Master Response Read Queue parity error",
4000 { F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
4001 { F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
4002 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
4003 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
4004 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
4005 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
4006 { F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
4008 { F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
4010 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
4011 { F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
4012 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
4013 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
4014 { F_DREQWRPERR, "PCI DMA channel write request parity error",
4016 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
4017 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
4018 { F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
4019 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
4020 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
4021 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
4022 { F_FIDPERR, "PCI FID parity error", -1, 1 },
4023 { F_VFIDPERR, "PCI INTx clear parity error", -1, 1 },
4024 { F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
4025 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
4026 { F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
4028 { F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
4030 { F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
4031 { F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
4032 { F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
4033 { F_READRSPERR, "Outbound read error", -1,
4041 fat = t4_handle_intr_status(adapter,
4042 A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
4044 t4_handle_intr_status(adapter,
4045 A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
4046 pcie_port_intr_info) +
4047 t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
4050 fat = t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
4053 t4_fatal_err(adapter);
4057 * TP interrupt handler.
4059 static void tp_intr_handler(struct adapter *adapter)
4061 static const struct intr_info tp_intr_info[] = {
4062 { 0x3fffffff, "TP parity error", -1, 1 },
4063 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
4067 if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
4068 t4_fatal_err(adapter);
4072 * SGE interrupt handler.
4074 static void sge_intr_handler(struct adapter *adapter)
4079 static const struct intr_info sge_intr_info[] = {
4080 { F_ERR_CPL_EXCEED_IQE_SIZE,
4081 "SGE received CPL exceeding IQE size", -1, 1 },
4082 { F_ERR_INVALID_CIDX_INC,
4083 "SGE GTS CIDX increment too large", -1, 0 },
4084 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
4085 { F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
4086 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
4087 "SGE IQID > 1023 received CPL for FL", -1, 0 },
4088 { F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
4090 { F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
4092 { F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
4094 { F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
4096 { F_ERR_ING_CTXT_PRIO,
4097 "SGE too many priority ingress contexts", -1, 0 },
4098 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
4099 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
4100 { F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 |
4101 F_ERR_PCIE_ERROR2 | F_ERR_PCIE_ERROR3,
4102 "SGE PCIe error for a DBP thread", -1, 0 },
4106 static const struct intr_info t4t5_sge_intr_info[] = {
4107 { F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
4108 { F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
4109 { F_ERR_EGR_CTXT_PRIO,
4110 "SGE too many priority egress contexts", -1, 0 },
4115 * For now, treat below interrupts as fatal so that we disable SGE and
4116 * get better debug */
4117 static const struct intr_info t6_sge_intr_info[] = {
4119 "SGE Actual WRE packet is less than advertized length",
4124 v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
4125 ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
4127 CH_ALERT(adapter, "SGE parity error (%#llx)\n",
4128 (unsigned long long)v);
4129 t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
4130 t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
4133 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
4134 if (chip_id(adapter) <= CHELSIO_T5)
4135 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
4136 t4t5_sge_intr_info);
4138 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
4141 err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
4142 if (err & F_ERROR_QID_VALID) {
4143 CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
4144 if (err & F_UNCAPTURED_ERROR)
4145 CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
4146 t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
4147 F_UNCAPTURED_ERROR);
4151 t4_fatal_err(adapter);
4154 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
4155 F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
4156 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
4157 F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
4160 * CIM interrupt handler.
4162 static void cim_intr_handler(struct adapter *adapter)
4164 static const struct intr_info cim_intr_info[] = {
4165 { F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
4166 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
4167 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
4168 { F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
4169 { F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
4170 { F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
4171 { F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
4172 { F_TIMER0INT, "CIM TIMER0 interrupt", -1, 1 },
4175 static const struct intr_info cim_upintr_info[] = {
4176 { F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
4177 { F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
4178 { F_ILLWRINT, "CIM illegal write", -1, 1 },
4179 { F_ILLRDINT, "CIM illegal read", -1, 1 },
4180 { F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
4181 { F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
4182 { F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
4183 { F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
4184 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
4185 { F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
4186 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
4187 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
4188 { F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
4189 { F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
4190 { F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
4191 { F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
4192 { F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
4193 { F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
4194 { F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
4195 { F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
4196 { F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
4197 { F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
4198 { F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
4199 { F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
4200 { F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
4201 { F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
4202 { F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
4203 { F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
4209 fw_err = t4_read_reg(adapter, A_PCIE_FW);
4210 if (fw_err & F_PCIE_FW_ERR)
4211 t4_report_fw_error(adapter);
4213 /* When the Firmware detects an internal error which normally wouldn't
4214 * raise a Host Interrupt, it forces a CIM Timer0 interrupt in order
4215 * to make sure the Host sees the Firmware Crash. So if we have a
4216 * Timer0 interrupt and don't see a Firmware Crash, ignore the Timer0
4219 val = t4_read_reg(adapter, A_CIM_HOST_INT_CAUSE);
4220 if (val & F_TIMER0INT)
4221 if (!(fw_err & F_PCIE_FW_ERR) ||
4222 (G_PCIE_FW_EVAL(fw_err) != PCIE_FW_EVAL_CRASH))
4223 t4_write_reg(adapter, A_CIM_HOST_INT_CAUSE,
4226 fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
4228 t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
4231 t4_fatal_err(adapter);
4235 * ULP RX interrupt handler.
4237 static void ulprx_intr_handler(struct adapter *adapter)
4239 static const struct intr_info ulprx_intr_info[] = {
4240 { F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
4241 { F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
4242 { 0x7fffff, "ULPRX parity error", -1, 1 },
4246 if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
4247 t4_fatal_err(adapter);
4251 * ULP TX interrupt handler.
4253 static void ulptx_intr_handler(struct adapter *adapter)
4255 static const struct intr_info ulptx_intr_info[] = {
4256 { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
4258 { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
4260 { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
4262 { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
4264 { 0xfffffff, "ULPTX parity error", -1, 1 },
4268 if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
4269 t4_fatal_err(adapter);
4273 * PM TX interrupt handler.
4275 static void pmtx_intr_handler(struct adapter *adapter)
4277 static const struct intr_info pmtx_intr_info[] = {
4278 { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
4279 { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
4280 { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
4281 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
4282 { 0xffffff0, "PMTX framing error", -1, 1 },
4283 { F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
4284 { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
4286 { F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
4287 { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
4291 if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
4292 t4_fatal_err(adapter);
4296 * PM RX interrupt handler.
4298 static void pmrx_intr_handler(struct adapter *adapter)
4300 static const struct intr_info pmrx_intr_info[] = {
4301 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
4302 { 0x3ffff0, "PMRX framing error", -1, 1 },
4303 { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
4304 { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
4306 { F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
4307 { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
4311 if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
4312 t4_fatal_err(adapter);
4316 * CPL switch interrupt handler.
4318 static void cplsw_intr_handler(struct adapter *adapter)
4320 static const struct intr_info cplsw_intr_info[] = {
4321 { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
4322 { F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
4323 { F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
4324 { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
4325 { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
4326 { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
4330 if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
4331 t4_fatal_err(adapter);
4335 * LE interrupt handler.
4337 static void le_intr_handler(struct adapter *adap)
4339 unsigned int chip_ver = chip_id(adap);
4340 static const struct intr_info le_intr_info[] = {
4341 { F_LIPMISS, "LE LIP miss", -1, 0 },
4342 { F_LIP0, "LE 0 LIP error", -1, 0 },
4343 { F_PARITYERR, "LE parity error", -1, 1 },
4344 { F_UNKNOWNCMD, "LE unknown command", -1, 1 },
4345 { F_REQQPARERR, "LE request queue parity error", -1, 1 },
4349 static const struct intr_info t6_le_intr_info[] = {
4350 { F_T6_LIPMISS, "LE LIP miss", -1, 0 },
4351 { F_T6_LIP0, "LE 0 LIP error", -1, 0 },
4352 { F_TCAMINTPERR, "LE parity error", -1, 1 },
4353 { F_T6_UNKNOWNCMD, "LE unknown command", -1, 1 },
4354 { F_SSRAMINTPERR, "LE request queue parity error", -1, 1 },
4358 if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE,
4359 (chip_ver <= CHELSIO_T5) ?
4360 le_intr_info : t6_le_intr_info))
4365 * MPS interrupt handler.
4367 static void mps_intr_handler(struct adapter *adapter)
4369 static const struct intr_info mps_rx_intr_info[] = {
4370 { 0xffffff, "MPS Rx parity error", -1, 1 },
4373 static const struct intr_info mps_tx_intr_info[] = {
4374 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
4375 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4376 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
4378 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
4380 { F_BUBBLE, "MPS Tx underflow", -1, 1 },
4381 { F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
4382 { F_FRMERR, "MPS Tx framing error", -1, 1 },
4385 static const struct intr_info mps_trc_intr_info[] = {
4386 { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
4387 { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
4389 { F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
4392 static const struct intr_info mps_stat_sram_intr_info[] = {
4393 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
4396 static const struct intr_info mps_stat_tx_intr_info[] = {
4397 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
4400 static const struct intr_info mps_stat_rx_intr_info[] = {
4401 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
4404 static const struct intr_info mps_cls_intr_info[] = {
4405 { F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
4406 { F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
4407 { F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
4413 fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
4415 t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
4417 t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
4418 mps_trc_intr_info) +
4419 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
4420 mps_stat_sram_intr_info) +
4421 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
4422 mps_stat_tx_intr_info) +
4423 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
4424 mps_stat_rx_intr_info) +
4425 t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
4428 t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
4429 t4_read_reg(adapter, A_MPS_INT_CAUSE); /* flush */
4431 t4_fatal_err(adapter);
4434 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | \
4438 * EDC/MC interrupt handler.
4440 static void mem_intr_handler(struct adapter *adapter, int idx)
4442 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
4444 unsigned int addr, cnt_addr, v;
4446 if (idx <= MEM_EDC1) {
4447 addr = EDC_REG(A_EDC_INT_CAUSE, idx);
4448 cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
4449 } else if (idx == MEM_MC) {
4450 if (is_t4(adapter)) {
4451 addr = A_MC_INT_CAUSE;
4452 cnt_addr = A_MC_ECC_STATUS;
4454 addr = A_MC_P_INT_CAUSE;
4455 cnt_addr = A_MC_P_ECC_STATUS;
4458 addr = MC_REG(A_MC_P_INT_CAUSE, 1);
4459 cnt_addr = MC_REG(A_MC_P_ECC_STATUS, 1);
4462 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
4463 if (v & F_PERR_INT_CAUSE)
4464 CH_ALERT(adapter, "%s FIFO parity error\n",
4466 if (v & F_ECC_CE_INT_CAUSE) {
4467 u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
4469 if (idx <= MEM_EDC1)
4470 t4_edc_err_read(adapter, idx);
4472 t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
4473 CH_WARN_RATELIMIT(adapter,
4474 "%u %s correctable ECC data error%s\n",
4475 cnt, name[idx], cnt > 1 ? "s" : "");
4477 if (v & F_ECC_UE_INT_CAUSE)
4479 "%s uncorrectable ECC data error\n", name[idx]);
4481 t4_write_reg(adapter, addr, v);
4482 if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
4483 t4_fatal_err(adapter);
4487 * MA interrupt handler.
4489 static void ma_intr_handler(struct adapter *adapter)
4491 u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
4493 if (status & F_MEM_PERR_INT_CAUSE) {
4495 "MA parity error, parity status %#x\n",
4496 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS1));
4499 "MA parity error, parity status %#x\n",
4500 t4_read_reg(adapter,
4501 A_MA_PARITY_ERROR_STATUS2));
4503 if (status & F_MEM_WRAP_INT_CAUSE) {
4504 v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
4505 CH_ALERT(adapter, "MA address wrap-around error by "
4506 "client %u to address %#x\n",
4507 G_MEM_WRAP_CLIENT_NUM(v),
4508 G_MEM_WRAP_ADDRESS(v) << 4);
4510 t4_write_reg(adapter, A_MA_INT_CAUSE, status);
4511 t4_fatal_err(adapter);
4515 * SMB interrupt handler.
4517 static void smb_intr_handler(struct adapter *adap)
4519 static const struct intr_info smb_intr_info[] = {
4520 { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
4521 { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
4522 { F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
4526 if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
4531 * NC-SI interrupt handler.
4533 static void ncsi_intr_handler(struct adapter *adap)
4535 static const struct intr_info ncsi_intr_info[] = {
4536 { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
4537 { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
4538 { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
4539 { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
4543 if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
4548 * XGMAC interrupt handler.
4550 static void xgmac_intr_handler(struct adapter *adap, int port)
4552 u32 v, int_cause_reg;
4555 int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
4557 int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
4559 v = t4_read_reg(adap, int_cause_reg);
4561 v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR);
4565 if (v & F_TXFIFO_PRTY_ERR)
4566 CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n",
4568 if (v & F_RXFIFO_PRTY_ERR)
4569 CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n",
4571 t4_write_reg(adap, int_cause_reg, v);
4576 * PL interrupt handler.
4578 static void pl_intr_handler(struct adapter *adap)
4580 static const struct intr_info pl_intr_info[] = {
4581 { F_FATALPERR, "Fatal parity error", -1, 1 },
4582 { F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
4586 static const struct intr_info t5_pl_intr_info[] = {
4587 { F_FATALPERR, "Fatal parity error", -1, 1 },
4591 if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE,
4593 pl_intr_info : t5_pl_intr_info))
4597 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
4600 * t4_slow_intr_handler - control path interrupt handler
4601 * @adapter: the adapter
4603 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
4604 * The designation 'slow' is because it involves register reads, while
4605 * data interrupts typically don't involve any MMIOs.
4607 int t4_slow_intr_handler(struct adapter *adapter)
4609 u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
4611 if (!(cause & GLBL_INTR_MASK))
4614 cim_intr_handler(adapter);
4616 mps_intr_handler(adapter);
4618 ncsi_intr_handler(adapter);
4620 pl_intr_handler(adapter);
4622 smb_intr_handler(adapter);
4624 xgmac_intr_handler(adapter, 0);
4626 xgmac_intr_handler(adapter, 1);
4628 xgmac_intr_handler(adapter, 2);
4630 xgmac_intr_handler(adapter, 3);
4632 pcie_intr_handler(adapter);
4634 mem_intr_handler(adapter, MEM_MC);
4635 if (is_t5(adapter) && (cause & F_MC1))
4636 mem_intr_handler(adapter, MEM_MC1);
4638 mem_intr_handler(adapter, MEM_EDC0);
4640 mem_intr_handler(adapter, MEM_EDC1);
4642 le_intr_handler(adapter);
4644 tp_intr_handler(adapter);
4646 ma_intr_handler(adapter);
4647 if (cause & F_PM_TX)
4648 pmtx_intr_handler(adapter);
4649 if (cause & F_PM_RX)
4650 pmrx_intr_handler(adapter);
4651 if (cause & F_ULP_RX)
4652 ulprx_intr_handler(adapter);
4653 if (cause & F_CPL_SWITCH)
4654 cplsw_intr_handler(adapter);
4656 sge_intr_handler(adapter);
4657 if (cause & F_ULP_TX)
4658 ulptx_intr_handler(adapter);
4660 /* Clear the interrupts just processed for which we are the master. */
4661 t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
4662 (void)t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
4667 * t4_intr_enable - enable interrupts
4668 * @adapter: the adapter whose interrupts should be enabled
4670 * Enable PF-specific interrupts for the calling function and the top-level
4671 * interrupt concentrator for global interrupts. Interrupts are already
4672 * enabled at each module, here we just enable the roots of the interrupt
4675 * Note: this function should be called only when the driver manages
4676 * non PF-specific interrupts from the various HW modules. Only one PCI
4677 * function at a time should be doing this.
4679 void t4_intr_enable(struct adapter *adapter)
4682 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4683 u32 pf = (chip_id(adapter) <= CHELSIO_T5
4684 ? G_SOURCEPF(whoami)
4685 : G_T6_SOURCEPF(whoami));
4687 if (chip_id(adapter) <= CHELSIO_T5)
4688 val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
4690 val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN;
4691 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
4692 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
4693 F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR |
4694 F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
4695 F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
4696 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
4697 F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val);
4698 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
4699 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
4703 * t4_intr_disable - disable interrupts
4704 * @adapter: the adapter whose interrupts should be disabled
4706 * Disable interrupts. We only disable the top-level interrupt
4707 * concentrators. The caller must be a PCI function managing global
4710 void t4_intr_disable(struct adapter *adapter)
4712 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4713 u32 pf = (chip_id(adapter) <= CHELSIO_T5
4714 ? G_SOURCEPF(whoami)
4715 : G_T6_SOURCEPF(whoami));
4717 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
4718 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
4722 * t4_intr_clear - clear all interrupts
4723 * @adapter: the adapter whose interrupts should be cleared
4725 * Clears all interrupts. The caller must be a PCI function managing
4726 * global interrupts.
4728 void t4_intr_clear(struct adapter *adapter)
4730 static const unsigned int cause_reg[] = {
4731 A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
4732 A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
4733 A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS1, A_MA_INT_CAUSE,
4734 A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
4735 A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
4736 MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
4738 A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE,
4739 A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE,
4740 A_MPS_RX_PERR_INT_CAUSE,
4742 MYPF_REG(A_PL_PF_INT_CAUSE),
4749 for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
4750 t4_write_reg(adapter, cause_reg[i], 0xffffffff);
4752 t4_write_reg(adapter, is_t4(adapter) ? A_MC_INT_CAUSE :
4753 A_MC_P_INT_CAUSE, 0xffffffff);
4755 if (is_t4(adapter)) {
4756 t4_write_reg(adapter, A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
4758 t4_write_reg(adapter, A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
4761 t4_write_reg(adapter, A_MA_PARITY_ERROR_STATUS2, 0xffffffff);
4763 t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
4764 (void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
4768 * hash_mac_addr - return the hash value of a MAC address
4769 * @addr: the 48-bit Ethernet MAC address
4771 * Hashes a MAC address according to the hash function used by HW inexact
4772 * (hash) address matching.
4774 static int hash_mac_addr(const u8 *addr)
4776 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
4777 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
4785 * t4_config_rss_range - configure a portion of the RSS mapping table
4786 * @adapter: the adapter
4787 * @mbox: mbox to use for the FW command
4788 * @viid: virtual interface whose RSS subtable is to be written
4789 * @start: start entry in the table to write
4790 * @n: how many table entries to write
4791 * @rspq: values for the "response queue" (Ingress Queue) lookup table
4792 * @nrspq: number of values in @rspq
4794 * Programs the selected part of the VI's RSS mapping table with the
4795 * provided values. If @nrspq < @n the supplied values are used repeatedly
4796 * until the full table range is populated.
4798 * The caller must ensure the values in @rspq are in the range allowed for
4801 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
4802 int start, int n, const u16 *rspq, unsigned int nrspq)
4805 const u16 *rsp = rspq;
4806 const u16 *rsp_end = rspq + nrspq;
4807 struct fw_rss_ind_tbl_cmd cmd;
4809 memset(&cmd, 0, sizeof(cmd));
4810 cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
4811 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4812 V_FW_RSS_IND_TBL_CMD_VIID(viid));
4813 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
4816 * Each firmware RSS command can accommodate up to 32 RSS Ingress
4817 * Queue Identifiers. These Ingress Queue IDs are packed three to
4818 * a 32-bit word as 10-bit values with the upper remaining 2 bits
4822 int nq = min(n, 32);
4824 __be32 *qp = &cmd.iq0_to_iq2;
4827 * Set up the firmware RSS command header to send the next
4828 * "nq" Ingress Queue IDs to the firmware.
4830 cmd.niqid = cpu_to_be16(nq);
4831 cmd.startidx = cpu_to_be16(start);
4834 * "nq" more done for the start of the next loop.
4840 * While there are still Ingress Queue IDs to stuff into the
4841 * current firmware RSS command, retrieve them from the
4842 * Ingress Queue ID array and insert them into the command.
4846 * Grab up to the next 3 Ingress Queue IDs (wrapping
4847 * around the Ingress Queue ID array if necessary) and
4848 * insert them into the firmware RSS command at the
4849 * current 3-tuple position within the commad.
4853 int nqbuf = min(3, nq);
4856 qbuf[0] = qbuf[1] = qbuf[2] = 0;
4857 while (nqbuf && nq_packed < 32) {
4864 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
4865 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
4866 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
4870 * Send this portion of the RRS table update to the firmware;
4871 * bail out on any errors.
4873 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
4881 * t4_config_glbl_rss - configure the global RSS mode
4882 * @adapter: the adapter
4883 * @mbox: mbox to use for the FW command
4884 * @mode: global RSS mode
4885 * @flags: mode-specific flags
4887 * Sets the global RSS mode.
4889 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
4892 struct fw_rss_glb_config_cmd c;
4894 memset(&c, 0, sizeof(c));
4895 c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
4896 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
4897 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4898 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
4899 c.u.manual.mode_pkd =
4900 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
4901 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
4902 c.u.basicvirtual.mode_keymode =
4903 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
4904 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
4907 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
4911 * t4_config_vi_rss - configure per VI RSS settings
4912 * @adapter: the adapter
4913 * @mbox: mbox to use for the FW command
4916 * @defq: id of the default RSS queue for the VI.
4917 * @skeyidx: RSS secret key table index for non-global mode
4918 * @skey: RSS vf_scramble key for VI.
4920 * Configures VI-specific RSS properties.
4922 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
4923 unsigned int flags, unsigned int defq, unsigned int skeyidx,
4926 struct fw_rss_vi_config_cmd c;
4928 memset(&c, 0, sizeof(c));
4929 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
4930 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4931 V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
4932 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4933 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
4934 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
4935 c.u.basicvirtual.secretkeyidx_pkd = cpu_to_be32(
4936 V_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(skeyidx));
4937 c.u.basicvirtual.secretkeyxor = cpu_to_be32(skey);
4939 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
4942 /* Read an RSS table row */
4943 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
4945 t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
4946 return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
4951 * t4_read_rss - read the contents of the RSS mapping table
4952 * @adapter: the adapter
4953 * @map: holds the contents of the RSS mapping table
4955 * Reads the contents of the RSS hash->queue mapping table.
4957 int t4_read_rss(struct adapter *adapter, u16 *map)
4962 for (i = 0; i < RSS_NENTRIES / 2; ++i) {
4963 ret = rd_rss_row(adapter, i, &val);
4966 *map++ = G_LKPTBLQUEUE0(val);
4967 *map++ = G_LKPTBLQUEUE1(val);
4973 * t4_tp_fw_ldst_rw - Access TP indirect register through LDST
4974 * @adap: the adapter
4975 * @cmd: TP fw ldst address space type
4976 * @vals: where the indirect register values are stored/written
4977 * @nregs: how many indirect registers to read/write
4978 * @start_idx: index of first indirect register to read/write
4979 * @rw: Read (1) or Write (0)
4980 * @sleep_ok: if true we may sleep while awaiting command completion
4982 * Access TP indirect registers through LDST
4984 static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals,
4985 unsigned int nregs, unsigned int start_index,
4986 unsigned int rw, bool sleep_ok)
4990 struct fw_ldst_cmd c;
4992 for (i = 0; i < nregs; i++) {
4993 memset(&c, 0, sizeof(c));
4994 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
4996 (rw ? F_FW_CMD_READ :
4998 V_FW_LDST_CMD_ADDRSPACE(cmd));
4999 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5001 c.u.addrval.addr = cpu_to_be32(start_index + i);
5002 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
5003 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c,
5009 vals[i] = be32_to_cpu(c.u.addrval.val);
5015 * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor
5016 * @adap: the adapter
5017 * @reg_addr: Address Register
5018 * @reg_data: Data register
5019 * @buff: where the indirect register values are stored/written
5020 * @nregs: how many indirect registers to read/write
5021 * @start_index: index of first indirect register to read/write
5022 * @rw: READ(1) or WRITE(0)
5023 * @sleep_ok: if true we may sleep while awaiting command completion
5025 * Read/Write TP indirect registers through LDST if possible.
5026 * Else, use backdoor access
5028 static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data,
5029 u32 *buff, u32 nregs, u32 start_index, int rw,
5037 cmd = FW_LDST_ADDRSPC_TP_PIO;
5039 case A_TP_TM_PIO_ADDR:
5040 cmd = FW_LDST_ADDRSPC_TP_TM_PIO;
5042 case A_TP_MIB_INDEX:
5043 cmd = FW_LDST_ADDRSPC_TP_MIB;
5046 goto indirect_access;
5049 if (t4_use_ldst(adap))
5050 rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw,
5057 t4_read_indirect(adap, reg_addr, reg_data, buff, nregs,
5060 t4_write_indirect(adap, reg_addr, reg_data, buff, nregs,
5066 * t4_tp_pio_read - Read TP PIO registers
5067 * @adap: the adapter
5068 * @buff: where the indirect register values are written
5069 * @nregs: how many indirect registers to read
5070 * @start_index: index of first indirect register to read
5071 * @sleep_ok: if true we may sleep while awaiting command completion
5073 * Read TP PIO Registers
5075 void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5076 u32 start_index, bool sleep_ok)
5078 t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs,
5079 start_index, 1, sleep_ok);
5083 * t4_tp_pio_write - Write TP PIO registers
5084 * @adap: the adapter
5085 * @buff: where the indirect register values are stored
5086 * @nregs: how many indirect registers to write
5087 * @start_index: index of first indirect register to write
5088 * @sleep_ok: if true we may sleep while awaiting command completion
5090 * Write TP PIO Registers
5092 void t4_tp_pio_write(struct adapter *adap, const u32 *buff, u32 nregs,
5093 u32 start_index, bool sleep_ok)
5095 t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5096 __DECONST(u32 *, buff), nregs, start_index, 0, sleep_ok);
5100 * t4_tp_tm_pio_read - Read TP TM PIO registers
5101 * @adap: the adapter
5102 * @buff: where the indirect register values are written
5103 * @nregs: how many indirect registers to read
5104 * @start_index: index of first indirect register to read
5105 * @sleep_ok: if true we may sleep while awaiting command completion
5107 * Read TP TM PIO Registers
5109 void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5110 u32 start_index, bool sleep_ok)
5112 t4_tp_indirect_rw(adap, A_TP_TM_PIO_ADDR, A_TP_TM_PIO_DATA, buff,
5113 nregs, start_index, 1, sleep_ok);
5117 * t4_tp_mib_read - Read TP MIB registers
5118 * @adap: the adapter
5119 * @buff: where the indirect register values are written
5120 * @nregs: how many indirect registers to read
5121 * @start_index: index of first indirect register to read
5122 * @sleep_ok: if true we may sleep while awaiting command completion
5124 * Read TP MIB Registers
5126 void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index,
5129 t4_tp_indirect_rw(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, buff, nregs,
5130 start_index, 1, sleep_ok);
5134 * t4_read_rss_key - read the global RSS key
5135 * @adap: the adapter
5136 * @key: 10-entry array holding the 320-bit RSS key
5137 * @sleep_ok: if true we may sleep while awaiting command completion
5139 * Reads the global 320-bit RSS key.
5141 void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok)
5143 t4_tp_pio_read(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
5147 * t4_write_rss_key - program one of the RSS keys
5148 * @adap: the adapter
5149 * @key: 10-entry array holding the 320-bit RSS key
5150 * @idx: which RSS key to write
5151 * @sleep_ok: if true we may sleep while awaiting command completion
5153 * Writes one of the RSS keys with the given 320-bit value. If @idx is
5154 * 0..15 the corresponding entry in the RSS key table is written,
5155 * otherwise the global RSS key is written.
5157 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
5160 u8 rss_key_addr_cnt = 16;
5161 u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
5164 * T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
5165 * allows access to key addresses 16-63 by using KeyWrAddrX
5166 * as index[5:4](upper 2) into key table
5168 if ((chip_id(adap) > CHELSIO_T5) &&
5169 (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
5170 rss_key_addr_cnt = 32;
5172 t4_tp_pio_write(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
5174 if (idx >= 0 && idx < rss_key_addr_cnt) {
5175 if (rss_key_addr_cnt > 16)
5176 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
5177 vrt | V_KEYWRADDRX(idx >> 4) |
5178 V_T6_VFWRADDR(idx) | F_KEYWREN);
5180 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
5181 vrt| V_KEYWRADDR(idx) | F_KEYWREN);
5186 * t4_read_rss_pf_config - read PF RSS Configuration Table
5187 * @adapter: the adapter
5188 * @index: the entry in the PF RSS table to read
5189 * @valp: where to store the returned value
5190 * @sleep_ok: if true we may sleep while awaiting command completion
5192 * Reads the PF RSS Configuration Table at the specified index and returns
5193 * the value found there.
5195 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
5196 u32 *valp, bool sleep_ok)
5198 t4_tp_pio_read(adapter, valp, 1, A_TP_RSS_PF0_CONFIG + index, sleep_ok);
5202 * t4_write_rss_pf_config - write PF RSS Configuration Table
5203 * @adapter: the adapter
5204 * @index: the entry in the VF RSS table to read
5205 * @val: the value to store
5206 * @sleep_ok: if true we may sleep while awaiting command completion
5208 * Writes the PF RSS Configuration Table at the specified index with the
5211 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index,
5212 u32 val, bool sleep_ok)
5214 t4_tp_pio_write(adapter, &val, 1, A_TP_RSS_PF0_CONFIG + index,
5219 * t4_read_rss_vf_config - read VF RSS Configuration Table
5220 * @adapter: the adapter
5221 * @index: the entry in the VF RSS table to read
5222 * @vfl: where to store the returned VFL
5223 * @vfh: where to store the returned VFH
5224 * @sleep_ok: if true we may sleep while awaiting command completion
5226 * Reads the VF RSS Configuration Table at the specified index and returns
5227 * the (VFL, VFH) values found there.
5229 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
5230 u32 *vfl, u32 *vfh, bool sleep_ok)
5232 u32 vrt, mask, data;
5234 if (chip_id(adapter) <= CHELSIO_T5) {
5235 mask = V_VFWRADDR(M_VFWRADDR);
5236 data = V_VFWRADDR(index);
5238 mask = V_T6_VFWRADDR(M_T6_VFWRADDR);
5239 data = V_T6_VFWRADDR(index);
5242 * Request that the index'th VF Table values be read into VFL/VFH.
5244 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
5245 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
5246 vrt |= data | F_VFRDEN;
5247 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
5250 * Grab the VFL/VFH values ...
5252 t4_tp_pio_read(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok);
5253 t4_tp_pio_read(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok);
5257 * t4_write_rss_vf_config - write VF RSS Configuration Table
5259 * @adapter: the adapter
5260 * @index: the entry in the VF RSS table to write
5261 * @vfl: the VFL to store
5262 * @vfh: the VFH to store
5264 * Writes the VF RSS Configuration Table at the specified index with the
5265 * specified (VFL, VFH) values.
5267 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
5268 u32 vfl, u32 vfh, bool sleep_ok)
5270 u32 vrt, mask, data;
5272 if (chip_id(adapter) <= CHELSIO_T5) {
5273 mask = V_VFWRADDR(M_VFWRADDR);
5274 data = V_VFWRADDR(index);
5276 mask = V_T6_VFWRADDR(M_T6_VFWRADDR);
5277 data = V_T6_VFWRADDR(index);
5281 * Load up VFL/VFH with the values to be written ...
5283 t4_tp_pio_write(adapter, &vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok);
5284 t4_tp_pio_write(adapter, &vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok);
5287 * Write the VFL/VFH into the VF Table at index'th location.
5289 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
5290 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
5291 vrt |= data | F_VFRDEN;
5292 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
5296 * t4_read_rss_pf_map - read PF RSS Map
5297 * @adapter: the adapter
5298 * @sleep_ok: if true we may sleep while awaiting command completion
5300 * Reads the PF RSS Map register and returns its value.
5302 u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok)
5306 t4_tp_pio_read(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok);
5312 * t4_write_rss_pf_map - write PF RSS Map
5313 * @adapter: the adapter
5314 * @pfmap: PF RSS Map value
5316 * Writes the specified value to the PF RSS Map register.
5318 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap, bool sleep_ok)
5320 t4_tp_pio_write(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok);
5324 * t4_read_rss_pf_mask - read PF RSS Mask
5325 * @adapter: the adapter
5326 * @sleep_ok: if true we may sleep while awaiting command completion
5328 * Reads the PF RSS Mask register and returns its value.
5330 u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok)
5334 t4_tp_pio_read(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok);
5340 * t4_write_rss_pf_mask - write PF RSS Mask
5341 * @adapter: the adapter
5342 * @pfmask: PF RSS Mask value
5344 * Writes the specified value to the PF RSS Mask register.
5346 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask, bool sleep_ok)
5348 t4_tp_pio_write(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok);
5352 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
5353 * @adap: the adapter
5354 * @v4: holds the TCP/IP counter values
5355 * @v6: holds the TCP/IPv6 counter values
5356 * @sleep_ok: if true we may sleep while awaiting command completion
5358 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
5359 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
5361 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
5362 struct tp_tcp_stats *v6, bool sleep_ok)
5364 u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
5366 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
5367 #define STAT(x) val[STAT_IDX(x)]
5368 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
5371 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5372 A_TP_MIB_TCP_OUT_RST, sleep_ok);
5373 v4->tcp_out_rsts = STAT(OUT_RST);
5374 v4->tcp_in_segs = STAT64(IN_SEG);
5375 v4->tcp_out_segs = STAT64(OUT_SEG);
5376 v4->tcp_retrans_segs = STAT64(RXT_SEG);
5379 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5380 A_TP_MIB_TCP_V6OUT_RST, sleep_ok);
5381 v6->tcp_out_rsts = STAT(OUT_RST);
5382 v6->tcp_in_segs = STAT64(IN_SEG);
5383 v6->tcp_out_segs = STAT64(OUT_SEG);
5384 v6->tcp_retrans_segs = STAT64(RXT_SEG);
5392 * t4_tp_get_err_stats - read TP's error MIB counters
5393 * @adap: the adapter
5394 * @st: holds the counter values
5395 * @sleep_ok: if true we may sleep while awaiting command completion
5397 * Returns the values of TP's error counters.
5399 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
5402 int nchan = adap->chip_params->nchan;
5404 t4_tp_mib_read(adap, st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0,
5407 t4_tp_mib_read(adap, st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0,
5410 t4_tp_mib_read(adap, st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0,
5413 t4_tp_mib_read(adap, st->tnl_cong_drops, nchan,
5414 A_TP_MIB_TNL_CNG_DROP_0, sleep_ok);
5416 t4_tp_mib_read(adap, st->ofld_chan_drops, nchan,
5417 A_TP_MIB_OFD_CHN_DROP_0, sleep_ok);
5419 t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0,
5422 t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan,
5423 A_TP_MIB_OFD_VLN_DROP_0, sleep_ok);
5425 t4_tp_mib_read(adap, st->tcp6_in_errs, nchan,
5426 A_TP_MIB_TCP_V6IN_ERR_0, sleep_ok);
5428 t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP,
5433 * t4_tp_get_proxy_stats - read TP's proxy MIB counters
5434 * @adap: the adapter
5435 * @st: holds the counter values
5437 * Returns the values of TP's proxy counters.
5439 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st,
5442 int nchan = adap->chip_params->nchan;
5444 t4_tp_mib_read(adap, st->proxy, nchan, A_TP_MIB_TNL_LPBK_0, sleep_ok);
5448 * t4_tp_get_cpl_stats - read TP's CPL MIB counters
5449 * @adap: the adapter
5450 * @st: holds the counter values
5451 * @sleep_ok: if true we may sleep while awaiting command completion
5453 * Returns the values of TP's CPL counters.
5455 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
5458 int nchan = adap->chip_params->nchan;
5460 t4_tp_mib_read(adap, st->req, nchan, A_TP_MIB_CPL_IN_REQ_0, sleep_ok);
5462 t4_tp_mib_read(adap, st->rsp, nchan, A_TP_MIB_CPL_OUT_RSP_0, sleep_ok);
5466 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
5467 * @adap: the adapter
5468 * @st: holds the counter values
5470 * Returns the values of TP's RDMA counters.
5472 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
5475 t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, A_TP_MIB_RQE_DFR_PKT,
5480 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
5481 * @adap: the adapter
5482 * @idx: the port index
5483 * @st: holds the counter values
5484 * @sleep_ok: if true we may sleep while awaiting command completion
5486 * Returns the values of TP's FCoE counters for the selected port.
5488 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
5489 struct tp_fcoe_stats *st, bool sleep_ok)
5493 t4_tp_mib_read(adap, &st->frames_ddp, 1, A_TP_MIB_FCOE_DDP_0 + idx,
5496 t4_tp_mib_read(adap, &st->frames_drop, 1,
5497 A_TP_MIB_FCOE_DROP_0 + idx, sleep_ok);
5499 t4_tp_mib_read(adap, val, 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx,
5502 st->octets_ddp = ((u64)val[0] << 32) | val[1];
5506 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
5507 * @adap: the adapter
5508 * @st: holds the counter values
5509 * @sleep_ok: if true we may sleep while awaiting command completion
5511 * Returns the values of TP's counters for non-TCP directly-placed packets.
5513 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
5518 t4_tp_mib_read(adap, val, 4, A_TP_MIB_USM_PKTS, sleep_ok);
5520 st->frames = val[0];
5522 st->octets = ((u64)val[2] << 32) | val[3];
5526 * t4_read_mtu_tbl - returns the values in the HW path MTU table
5527 * @adap: the adapter
5528 * @mtus: where to store the MTU values
5529 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
5531 * Reads the HW path MTU table.
5533 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
5538 for (i = 0; i < NMTUS; ++i) {
5539 t4_write_reg(adap, A_TP_MTU_TABLE,
5540 V_MTUINDEX(0xff) | V_MTUVALUE(i));
5541 v = t4_read_reg(adap, A_TP_MTU_TABLE);
5542 mtus[i] = G_MTUVALUE(v);
5544 mtu_log[i] = G_MTUWIDTH(v);
5549 * t4_read_cong_tbl - reads the congestion control table
5550 * @adap: the adapter
5551 * @incr: where to store the alpha values
5553 * Reads the additive increments programmed into the HW congestion
5556 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
5558 unsigned int mtu, w;
5560 for (mtu = 0; mtu < NMTUS; ++mtu)
5561 for (w = 0; w < NCCTRL_WIN; ++w) {
5562 t4_write_reg(adap, A_TP_CCTRL_TABLE,
5563 V_ROWINDEX(0xffff) | (mtu << 5) | w);
5564 incr[mtu][w] = (u16)t4_read_reg(adap,
5565 A_TP_CCTRL_TABLE) & 0x1fff;
5570 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
5571 * @adap: the adapter
5572 * @addr: the indirect TP register address
5573 * @mask: specifies the field within the register to modify
5574 * @val: new value for the field
5576 * Sets a field of an indirect TP register to the given value.
5578 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
5579 unsigned int mask, unsigned int val)
5581 t4_write_reg(adap, A_TP_PIO_ADDR, addr);
5582 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
5583 t4_write_reg(adap, A_TP_PIO_DATA, val);
5587 * init_cong_ctrl - initialize congestion control parameters
5588 * @a: the alpha values for congestion control
5589 * @b: the beta values for congestion control
5591 * Initialize the congestion control parameters.
5593 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
5595 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
5620 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
5623 b[13] = b[14] = b[15] = b[16] = 3;
5624 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
5625 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
5630 /* The minimum additive increment value for the congestion control table */
5631 #define CC_MIN_INCR 2U
5634 * t4_load_mtus - write the MTU and congestion control HW tables
5635 * @adap: the adapter
5636 * @mtus: the values for the MTU table
5637 * @alpha: the values for the congestion control alpha parameter
5638 * @beta: the values for the congestion control beta parameter
5640 * Write the HW MTU table with the supplied MTUs and the high-speed
5641 * congestion control table with the supplied alpha, beta, and MTUs.
5642 * We write the two tables together because the additive increments
5643 * depend on the MTUs.
5645 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
5646 const unsigned short *alpha, const unsigned short *beta)
5648 static const unsigned int avg_pkts[NCCTRL_WIN] = {
5649 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
5650 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
5651 28672, 40960, 57344, 81920, 114688, 163840, 229376
5656 for (i = 0; i < NMTUS; ++i) {
5657 unsigned int mtu = mtus[i];
5658 unsigned int log2 = fls(mtu);
5660 if (!(mtu & ((1 << log2) >> 2))) /* round */
5662 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
5663 V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
5665 for (w = 0; w < NCCTRL_WIN; ++w) {
5668 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
5671 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
5672 (w << 16) | (beta[w] << 13) | inc);
5678 * t4_set_pace_tbl - set the pace table
5679 * @adap: the adapter
5680 * @pace_vals: the pace values in microseconds
5681 * @start: index of the first entry in the HW pace table to set
5682 * @n: how many entries to set
5684 * Sets (a subset of the) HW pace table.
5686 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
5687 unsigned int start, unsigned int n)
5689 unsigned int vals[NTX_SCHED], i;
5690 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
5695 /* convert values from us to dack ticks, rounding to closest value */
5696 for (i = 0; i < n; i++, pace_vals++) {
5697 vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
5698 if (vals[i] > 0x7ff)
5700 if (*pace_vals && vals[i] == 0)
5703 for (i = 0; i < n; i++, start++)
5704 t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
5709 * t4_set_sched_bps - set the bit rate for a HW traffic scheduler
5710 * @adap: the adapter
5711 * @kbps: target rate in Kbps
5712 * @sched: the scheduler index
5714 * Configure a Tx HW scheduler for the target rate.
5716 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
5718 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
5719 unsigned int clk = adap->params.vpd.cclk * 1000;
5720 unsigned int selected_cpt = 0, selected_bpt = 0;
5723 kbps *= 125; /* -> bytes */
5724 for (cpt = 1; cpt <= 255; cpt++) {
5726 bpt = (kbps + tps / 2) / tps;
5727 if (bpt > 0 && bpt <= 255) {
5729 delta = v >= kbps ? v - kbps : kbps - v;
5730 if (delta < mindelta) {
5735 } else if (selected_cpt)
5741 t4_write_reg(adap, A_TP_TM_PIO_ADDR,
5742 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
5743 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
5745 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
5747 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
5748 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
5753 * t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
5754 * @adap: the adapter
5755 * @sched: the scheduler index
5756 * @ipg: the interpacket delay in tenths of nanoseconds
5758 * Set the interpacket delay for a HW packet rate scheduler.
5760 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
5762 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
5764 /* convert ipg to nearest number of core clocks */
5765 ipg *= core_ticks_per_usec(adap);
5766 ipg = (ipg + 5000) / 10000;
5767 if (ipg > M_TXTIMERSEPQ0)
5770 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
5771 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
5773 v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
5775 v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
5776 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
5777 t4_read_reg(adap, A_TP_TM_PIO_DATA);
5782 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
5783 * clocks. The formula is
5785 * bytes/s = bytes256 * 256 * ClkFreq / 4096
5787 * which is equivalent to
5789 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
5791 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
5793 u64 v = bytes256 * adap->params.vpd.cclk;
5795 return v * 62 + v / 2;
5799 * t4_get_chan_txrate - get the current per channel Tx rates
5800 * @adap: the adapter
5801 * @nic_rate: rates for NIC traffic
5802 * @ofld_rate: rates for offloaded traffic
5804 * Return the current Tx rates in bytes/s for NIC and offloaded traffic
5807 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
5811 v = t4_read_reg(adap, A_TP_TX_TRATE);
5812 nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
5813 nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
5814 if (adap->chip_params->nchan > 2) {
5815 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
5816 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
5819 v = t4_read_reg(adap, A_TP_TX_ORATE);
5820 ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
5821 ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
5822 if (adap->chip_params->nchan > 2) {
5823 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
5824 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
5829 * t4_set_trace_filter - configure one of the tracing filters
5830 * @adap: the adapter
5831 * @tp: the desired trace filter parameters
5832 * @idx: which filter to configure
5833 * @enable: whether to enable or disable the filter
5835 * Configures one of the tracing filters available in HW. If @tp is %NULL
5836 * it indicates that the filter is already written in the register and it
5837 * just needs to be enabled or disabled.
5839 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
5840 int idx, int enable)
5842 int i, ofst = idx * 4;
5843 u32 data_reg, mask_reg, cfg;
5844 u32 multitrc = F_TRCMULTIFILTER;
5845 u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN;
5847 if (idx < 0 || idx >= NTRACE)
5850 if (tp == NULL || !enable) {
5851 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en,
5857 * TODO - After T4 data book is updated, specify the exact
5860 * See T4 data book - MPS section for a complete description
5861 * of the below if..else handling of A_MPS_TRC_CFG register
5864 cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
5865 if (cfg & F_TRCMULTIFILTER) {
5867 * If multiple tracers are enabled, then maximum
5868 * capture size is 2.5KB (FIFO size of a single channel)
5869 * minus 2 flits for CPL_TRACE_PKT header.
5871 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
5875 * If multiple tracers are disabled, to avoid deadlocks
5876 * maximum packet capture size of 9600 bytes is recommended.
5877 * Also in this mode, only trace0 can be enabled and running.
5880 if (tp->snap_len > 9600 || idx)
5884 if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 ||
5885 tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET ||
5886 tp->min_len > M_TFMINPKTSIZE)
5889 /* stop the tracer we'll be changing */
5890 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0);
5892 idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
5893 data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
5894 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
5896 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5897 t4_write_reg(adap, data_reg, tp->data[i]);
5898 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
5900 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
5901 V_TFCAPTUREMAX(tp->snap_len) |
5902 V_TFMINPKTSIZE(tp->min_len));
5903 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
5904 V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en |
5906 V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) :
5907 V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert)));
5913 * t4_get_trace_filter - query one of the tracing filters
5914 * @adap: the adapter
5915 * @tp: the current trace filter parameters
5916 * @idx: which trace filter to query
5917 * @enabled: non-zero if the filter is enabled
5919 * Returns the current settings of one of the HW tracing filters.
5921 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
5925 int i, ofst = idx * 4;
5926 u32 data_reg, mask_reg;
5928 ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
5929 ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
5932 *enabled = !!(ctla & F_TFEN);
5933 tp->port = G_TFPORT(ctla);
5934 tp->invert = !!(ctla & F_TFINVERTMATCH);
5936 *enabled = !!(ctla & F_T5_TFEN);
5937 tp->port = G_T5_TFPORT(ctla);
5938 tp->invert = !!(ctla & F_T5_TFINVERTMATCH);
5940 tp->snap_len = G_TFCAPTUREMAX(ctlb);
5941 tp->min_len = G_TFMINPKTSIZE(ctlb);
5942 tp->skip_ofst = G_TFOFFSET(ctla);
5943 tp->skip_len = G_TFLENGTH(ctla);
5945 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
5946 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
5947 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
5949 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5950 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
5951 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
5956 * t4_pmtx_get_stats - returns the HW stats from PMTX
5957 * @adap: the adapter
5958 * @cnt: where to store the count statistics
5959 * @cycles: where to store the cycle statistics
5961 * Returns performance statistics from PMTX.
5963 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5968 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
5969 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
5970 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
5972 cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
5974 t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
5975 A_PM_TX_DBG_DATA, data, 2,
5976 A_PM_TX_DBG_STAT_MSB);
5977 cycles[i] = (((u64)data[0] << 32) | data[1]);
5983 * t4_pmrx_get_stats - returns the HW stats from PMRX
5984 * @adap: the adapter
5985 * @cnt: where to store the count statistics
5986 * @cycles: where to store the cycle statistics
5988 * Returns performance statistics from PMRX.
5990 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5995 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
5996 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
5997 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
5999 cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
6001 t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
6002 A_PM_RX_DBG_DATA, data, 2,
6003 A_PM_RX_DBG_STAT_MSB);
6004 cycles[i] = (((u64)data[0] << 32) | data[1]);
6010 * t4_get_mps_bg_map - return the buffer groups associated with a port
6011 * @adap: the adapter
6012 * @idx: the port index
6014 * Returns a bitmap indicating which MPS buffer groups are associated
6015 * with the given port. Bit i is set if buffer group i is used by the
6018 static unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
6022 if (adap->params.mps_bg_map)
6023 return ((adap->params.mps_bg_map >> (idx << 3)) & 0xff);
6025 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
6027 return idx == 0 ? 0xf : 0;
6028 if (n == 1 && chip_id(adap) <= CHELSIO_T5)
6029 return idx < 2 ? (3 << (2 * idx)) : 0;
6034 * TP RX e-channels associated with the port.
6036 static unsigned int t4_get_rx_e_chan_map(struct adapter *adap, int idx)
6038 u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
6041 return idx == 0 ? 0xf : 0;
6042 if (n == 1 && chip_id(adap) <= CHELSIO_T5)
6043 return idx < 2 ? (3 << (2 * idx)) : 0;
6048 * t4_get_port_type_description - return Port Type string description
6049 * @port_type: firmware Port Type enumeration
6051 const char *t4_get_port_type_description(enum fw_port_type port_type)
6053 static const char *const port_type_description[] = {
6078 if (port_type < ARRAY_SIZE(port_type_description))
6079 return port_type_description[port_type];
6084 * t4_get_port_stats_offset - collect port stats relative to a previous
6086 * @adap: The adapter
6088 * @stats: Current stats to fill
6089 * @offset: Previous stats snapshot
6091 void t4_get_port_stats_offset(struct adapter *adap, int idx,
6092 struct port_stats *stats,
6093 struct port_stats *offset)
6098 t4_get_port_stats(adap, idx, stats);
6099 for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
6100 i < (sizeof(struct port_stats)/sizeof(u64)) ;
6106 * t4_get_port_stats - collect port statistics
6107 * @adap: the adapter
6108 * @idx: the port index
6109 * @p: the stats structure to fill
6111 * Collect statistics related to the given port from HW.
6113 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
6115 u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map;
6116 u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
6118 #define GET_STAT(name) \
6119 t4_read_reg64(adap, \
6120 (is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
6121 T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
6122 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
6124 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
6125 p->tx_octets = GET_STAT(TX_PORT_BYTES);
6126 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
6127 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
6128 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
6129 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
6130 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
6131 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
6132 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
6133 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
6134 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
6135 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
6136 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
6137 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
6138 p->tx_drop = GET_STAT(TX_PORT_DROP);
6139 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
6140 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
6141 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
6142 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
6143 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
6144 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
6145 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
6146 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
6148 if (chip_id(adap) >= CHELSIO_T5) {
6149 if (stat_ctl & F_COUNTPAUSESTATTX) {
6150 p->tx_frames -= p->tx_pause;
6151 p->tx_octets -= p->tx_pause * 64;
6153 if (stat_ctl & F_COUNTPAUSEMCTX)
6154 p->tx_mcast_frames -= p->tx_pause;
6157 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
6158 p->rx_octets = GET_STAT(RX_PORT_BYTES);
6159 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
6160 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
6161 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
6162 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
6163 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
6164 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
6165 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
6166 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
6167 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
6168 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
6169 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
6170 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
6171 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
6172 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
6173 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
6174 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
6175 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
6176 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
6177 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
6178 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
6179 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
6180 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
6181 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
6182 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
6183 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
6185 if (chip_id(adap) >= CHELSIO_T5) {
6186 if (stat_ctl & F_COUNTPAUSESTATRX) {
6187 p->rx_frames -= p->rx_pause;
6188 p->rx_octets -= p->rx_pause * 64;
6190 if (stat_ctl & F_COUNTPAUSEMCRX)
6191 p->rx_mcast_frames -= p->rx_pause;
6194 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
6195 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
6196 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
6197 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
6198 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
6199 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
6200 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
6201 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
6208 * t4_get_lb_stats - collect loopback port statistics
6209 * @adap: the adapter
6210 * @idx: the loopback port index
6211 * @p: the stats structure to fill
6213 * Return HW statistics for the given loopback port.
6215 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
6217 u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map;
6219 #define GET_STAT(name) \
6220 t4_read_reg64(adap, \
6222 PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
6223 T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
6224 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
6226 p->octets = GET_STAT(BYTES);
6227 p->frames = GET_STAT(FRAMES);
6228 p->bcast_frames = GET_STAT(BCAST);
6229 p->mcast_frames = GET_STAT(MCAST);
6230 p->ucast_frames = GET_STAT(UCAST);
6231 p->error_frames = GET_STAT(ERROR);
6233 p->frames_64 = GET_STAT(64B);
6234 p->frames_65_127 = GET_STAT(65B_127B);
6235 p->frames_128_255 = GET_STAT(128B_255B);
6236 p->frames_256_511 = GET_STAT(256B_511B);
6237 p->frames_512_1023 = GET_STAT(512B_1023B);
6238 p->frames_1024_1518 = GET_STAT(1024B_1518B);
6239 p->frames_1519_max = GET_STAT(1519B_MAX);
6240 p->drop = GET_STAT(DROP_FRAMES);
6242 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
6243 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
6244 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
6245 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
6246 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
6247 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
6248 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
6249 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
6256 * t4_wol_magic_enable - enable/disable magic packet WoL
6257 * @adap: the adapter
6258 * @port: the physical port index
6259 * @addr: MAC address expected in magic packets, %NULL to disable
6261 * Enables/disables magic packet wake-on-LAN for the selected port.
6263 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
6266 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
6269 mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO);
6270 mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI);
6271 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
6273 mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO);
6274 mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI);
6275 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
6279 t4_write_reg(adap, mag_id_reg_l,
6280 (addr[2] << 24) | (addr[3] << 16) |
6281 (addr[4] << 8) | addr[5]);
6282 t4_write_reg(adap, mag_id_reg_h,
6283 (addr[0] << 8) | addr[1]);
6285 t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN,
6286 V_MAGICEN(addr != NULL));
6290 * t4_wol_pat_enable - enable/disable pattern-based WoL
6291 * @adap: the adapter
6292 * @port: the physical port index
6293 * @map: bitmap of which HW pattern filters to set
6294 * @mask0: byte mask for bytes 0-63 of a packet
6295 * @mask1: byte mask for bytes 64-127 of a packet
6296 * @crc: Ethernet CRC for selected bytes
6297 * @enable: enable/disable switch
6299 * Sets the pattern filters indicated in @map to mask out the bytes
6300 * specified in @mask0/@mask1 in received packets and compare the CRC of
6301 * the resulting packet against @crc. If @enable is %true pattern-based
6302 * WoL is enabled, otherwise disabled.
6304 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
6305 u64 mask0, u64 mask1, unsigned int crc, bool enable)
6311 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
6313 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
6316 t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
6322 #define EPIO_REG(name) \
6323 (is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \
6324 T5_PORT_REG(port, A_MAC_PORT_EPIO_##name))
6326 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
6327 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
6328 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
6330 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
6334 /* write byte masks */
6335 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
6336 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
6337 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
6338 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
6342 t4_write_reg(adap, EPIO_REG(DATA0), crc);
6343 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
6344 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
6345 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
6350 t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN);
6354 /* t4_mk_filtdelwr - create a delete filter WR
6355 * @ftid: the filter ID
6356 * @wr: the filter work request to populate
6357 * @qid: ingress queue to receive the delete notification
6359 * Creates a filter work request to delete the supplied filter. If @qid is
6360 * negative the delete notification is suppressed.
6362 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
6364 memset(wr, 0, sizeof(*wr));
6365 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
6366 wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
6367 wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
6368 V_FW_FILTER_WR_NOREPLY(qid < 0));
6369 wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
6371 wr->rx_chan_rx_rpl_iq =
6372 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
6375 #define INIT_CMD(var, cmd, rd_wr) do { \
6376 (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
6377 F_FW_CMD_REQUEST | \
6378 F_FW_CMD_##rd_wr); \
6379 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
6382 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
6386 struct fw_ldst_cmd c;
6388 memset(&c, 0, sizeof(c));
6389 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE);
6390 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6394 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6395 c.u.addrval.addr = cpu_to_be32(addr);
6396 c.u.addrval.val = cpu_to_be32(val);
6398 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6402 * t4_mdio_rd - read a PHY register through MDIO
6403 * @adap: the adapter
6404 * @mbox: mailbox to use for the FW command
6405 * @phy_addr: the PHY address
6406 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
6407 * @reg: the register to read
6408 * @valp: where to store the value
6410 * Issues a FW command through the given mailbox to read a PHY register.
6412 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6413 unsigned int mmd, unsigned int reg, unsigned int *valp)
6417 struct fw_ldst_cmd c;
6419 memset(&c, 0, sizeof(c));
6420 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
6421 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6422 F_FW_CMD_REQUEST | F_FW_CMD_READ |
6424 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6425 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
6426 V_FW_LDST_CMD_MMD(mmd));
6427 c.u.mdio.raddr = cpu_to_be16(reg);
6429 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6431 *valp = be16_to_cpu(c.u.mdio.rval);
6436 * t4_mdio_wr - write a PHY register through MDIO
6437 * @adap: the adapter
6438 * @mbox: mailbox to use for the FW command
6439 * @phy_addr: the PHY address
6440 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
6441 * @reg: the register to write
6442 * @valp: value to write
6444 * Issues a FW command through the given mailbox to write a PHY register.
6446 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6447 unsigned int mmd, unsigned int reg, unsigned int val)
6450 struct fw_ldst_cmd c;
6452 memset(&c, 0, sizeof(c));
6453 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
6454 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6455 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
6457 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6458 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
6459 V_FW_LDST_CMD_MMD(mmd));
6460 c.u.mdio.raddr = cpu_to_be16(reg);
6461 c.u.mdio.rval = cpu_to_be16(val);
6463 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6468 * t4_sge_decode_idma_state - decode the idma state
6469 * @adap: the adapter
6470 * @state: the state idma is stuck in
6472 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
6474 static const char * const t4_decode[] = {
6476 "IDMA_PUSH_MORE_CPL_FIFO",
6477 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6479 "IDMA_PHYSADDR_SEND_PCIEHDR",
6480 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6481 "IDMA_PHYSADDR_SEND_PAYLOAD",
6482 "IDMA_SEND_FIFO_TO_IMSG",
6483 "IDMA_FL_REQ_DATA_FL_PREP",
6484 "IDMA_FL_REQ_DATA_FL",
6486 "IDMA_FL_H_REQ_HEADER_FL",
6487 "IDMA_FL_H_SEND_PCIEHDR",
6488 "IDMA_FL_H_PUSH_CPL_FIFO",
6489 "IDMA_FL_H_SEND_CPL",
6490 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6491 "IDMA_FL_H_SEND_IP_HDR",
6492 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6493 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6494 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6495 "IDMA_FL_D_SEND_PCIEHDR",
6496 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6497 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6498 "IDMA_FL_SEND_PCIEHDR",
6499 "IDMA_FL_PUSH_CPL_FIFO",
6501 "IDMA_FL_SEND_PAYLOAD_FIRST",
6502 "IDMA_FL_SEND_PAYLOAD",
6503 "IDMA_FL_REQ_NEXT_DATA_FL",
6504 "IDMA_FL_SEND_NEXT_PCIEHDR",
6505 "IDMA_FL_SEND_PADDING",
6506 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6507 "IDMA_FL_SEND_FIFO_TO_IMSG",
6508 "IDMA_FL_REQ_DATAFL_DONE",
6509 "IDMA_FL_REQ_HEADERFL_DONE",
6511 static const char * const t5_decode[] = {
6514 "IDMA_PUSH_MORE_CPL_FIFO",
6515 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6516 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6517 "IDMA_PHYSADDR_SEND_PCIEHDR",
6518 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6519 "IDMA_PHYSADDR_SEND_PAYLOAD",
6520 "IDMA_SEND_FIFO_TO_IMSG",
6521 "IDMA_FL_REQ_DATA_FL",
6523 "IDMA_FL_DROP_SEND_INC",
6524 "IDMA_FL_H_REQ_HEADER_FL",
6525 "IDMA_FL_H_SEND_PCIEHDR",
6526 "IDMA_FL_H_PUSH_CPL_FIFO",
6527 "IDMA_FL_H_SEND_CPL",
6528 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6529 "IDMA_FL_H_SEND_IP_HDR",
6530 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6531 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6532 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6533 "IDMA_FL_D_SEND_PCIEHDR",
6534 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6535 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6536 "IDMA_FL_SEND_PCIEHDR",
6537 "IDMA_FL_PUSH_CPL_FIFO",
6539 "IDMA_FL_SEND_PAYLOAD_FIRST",
6540 "IDMA_FL_SEND_PAYLOAD",
6541 "IDMA_FL_REQ_NEXT_DATA_FL",
6542 "IDMA_FL_SEND_NEXT_PCIEHDR",
6543 "IDMA_FL_SEND_PADDING",
6544 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6546 static const char * const t6_decode[] = {
6548 "IDMA_PUSH_MORE_CPL_FIFO",
6549 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6550 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6551 "IDMA_PHYSADDR_SEND_PCIEHDR",
6552 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6553 "IDMA_PHYSADDR_SEND_PAYLOAD",
6554 "IDMA_FL_REQ_DATA_FL",
6556 "IDMA_FL_DROP_SEND_INC",
6557 "IDMA_FL_H_REQ_HEADER_FL",
6558 "IDMA_FL_H_SEND_PCIEHDR",
6559 "IDMA_FL_H_PUSH_CPL_FIFO",
6560 "IDMA_FL_H_SEND_CPL",
6561 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6562 "IDMA_FL_H_SEND_IP_HDR",
6563 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6564 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6565 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6566 "IDMA_FL_D_SEND_PCIEHDR",
6567 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6568 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6569 "IDMA_FL_SEND_PCIEHDR",
6570 "IDMA_FL_PUSH_CPL_FIFO",
6572 "IDMA_FL_SEND_PAYLOAD_FIRST",
6573 "IDMA_FL_SEND_PAYLOAD",
6574 "IDMA_FL_REQ_NEXT_DATA_FL",
6575 "IDMA_FL_SEND_NEXT_PCIEHDR",
6576 "IDMA_FL_SEND_PADDING",
6577 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6579 static const u32 sge_regs[] = {
6580 A_SGE_DEBUG_DATA_LOW_INDEX_2,
6581 A_SGE_DEBUG_DATA_LOW_INDEX_3,
6582 A_SGE_DEBUG_DATA_HIGH_INDEX_10,
6584 const char * const *sge_idma_decode;
6585 int sge_idma_decode_nstates;
6587 unsigned int chip_version = chip_id(adapter);
6589 /* Select the right set of decode strings to dump depending on the
6590 * adapter chip type.
6592 switch (chip_version) {
6594 sge_idma_decode = (const char * const *)t4_decode;
6595 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
6599 sge_idma_decode = (const char * const *)t5_decode;
6600 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
6604 sge_idma_decode = (const char * const *)t6_decode;
6605 sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
6609 CH_ERR(adapter, "Unsupported chip version %d\n", chip_version);
6613 if (state < sge_idma_decode_nstates)
6614 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
6616 CH_WARN(adapter, "idma state %d unknown\n", state);
6618 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
6619 CH_WARN(adapter, "SGE register %#x value %#x\n",
6620 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
6624 * t4_sge_ctxt_flush - flush the SGE context cache
6625 * @adap: the adapter
6626 * @mbox: mailbox to use for the FW command
6628 * Issues a FW command through the given mailbox to flush the
6629 * SGE context cache.
6631 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
6635 struct fw_ldst_cmd c;
6637 memset(&c, 0, sizeof(c));
6638 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC);
6639 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6640 F_FW_CMD_REQUEST | F_FW_CMD_READ |
6642 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6643 c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH);
6645 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6650 * t4_fw_hello - establish communication with FW
6651 * @adap: the adapter
6652 * @mbox: mailbox to use for the FW command
6653 * @evt_mbox: mailbox to receive async FW events
6654 * @master: specifies the caller's willingness to be the device master
6655 * @state: returns the current device state (if non-NULL)
6657 * Issues a command to establish communication with FW. Returns either
6658 * an error (negative integer) or the mailbox of the Master PF.
6660 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
6661 enum dev_master master, enum dev_state *state)
6664 struct fw_hello_cmd c;
6666 unsigned int master_mbox;
6667 int retries = FW_CMD_HELLO_RETRIES;
6670 memset(&c, 0, sizeof(c));
6671 INIT_CMD(c, HELLO, WRITE);
6672 c.err_to_clearinit = cpu_to_be32(
6673 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
6674 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
6675 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ?
6676 mbox : M_FW_HELLO_CMD_MBMASTER) |
6677 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
6678 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
6679 F_FW_HELLO_CMD_CLEARINIT);
6682 * Issue the HELLO command to the firmware. If it's not successful
6683 * but indicates that we got a "busy" or "timeout" condition, retry
6684 * the HELLO until we exhaust our retry limit. If we do exceed our
6685 * retry limit, check to see if the firmware left us any error
6686 * information and report that if so ...
6688 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6689 if (ret != FW_SUCCESS) {
6690 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
6692 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
6693 t4_report_fw_error(adap);
6697 v = be32_to_cpu(c.err_to_clearinit);
6698 master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
6700 if (v & F_FW_HELLO_CMD_ERR)
6701 *state = DEV_STATE_ERR;
6702 else if (v & F_FW_HELLO_CMD_INIT)
6703 *state = DEV_STATE_INIT;
6705 *state = DEV_STATE_UNINIT;
6709 * If we're not the Master PF then we need to wait around for the
6710 * Master PF Driver to finish setting up the adapter.
6712 * Note that we also do this wait if we're a non-Master-capable PF and
6713 * there is no current Master PF; a Master PF may show up momentarily
6714 * and we wouldn't want to fail pointlessly. (This can happen when an
6715 * OS loads lots of different drivers rapidly at the same time). In
6716 * this case, the Master PF returned by the firmware will be
6717 * M_PCIE_FW_MASTER so the test below will work ...
6719 if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
6720 master_mbox != mbox) {
6721 int waiting = FW_CMD_HELLO_TIMEOUT;
6724 * Wait for the firmware to either indicate an error or
6725 * initialized state. If we see either of these we bail out
6726 * and report the issue to the caller. If we exhaust the
6727 * "hello timeout" and we haven't exhausted our retries, try
6728 * again. Otherwise bail with a timeout error.
6737 * If neither Error nor Initialialized are indicated
6738 * by the firmware keep waiting till we exhaust our
6739 * timeout ... and then retry if we haven't exhausted
6742 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
6743 if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
6754 * We either have an Error or Initialized condition
6755 * report errors preferentially.
6758 if (pcie_fw & F_PCIE_FW_ERR)
6759 *state = DEV_STATE_ERR;
6760 else if (pcie_fw & F_PCIE_FW_INIT)
6761 *state = DEV_STATE_INIT;
6765 * If we arrived before a Master PF was selected and
6766 * there's not a valid Master PF, grab its identity
6769 if (master_mbox == M_PCIE_FW_MASTER &&
6770 (pcie_fw & F_PCIE_FW_MASTER_VLD))
6771 master_mbox = G_PCIE_FW_MASTER(pcie_fw);
6780 * t4_fw_bye - end communication with FW
6781 * @adap: the adapter
6782 * @mbox: mailbox to use for the FW command
6784 * Issues a command to terminate communication with FW.
6786 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
6788 struct fw_bye_cmd c;
6790 memset(&c, 0, sizeof(c));
6791 INIT_CMD(c, BYE, WRITE);
6792 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6796 * t4_fw_reset - issue a reset to FW
6797 * @adap: the adapter
6798 * @mbox: mailbox to use for the FW command
6799 * @reset: specifies the type of reset to perform
6801 * Issues a reset command of the specified type to FW.
6803 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
6805 struct fw_reset_cmd c;
6807 memset(&c, 0, sizeof(c));
6808 INIT_CMD(c, RESET, WRITE);
6809 c.val = cpu_to_be32(reset);
6810 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6814 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
6815 * @adap: the adapter
6816 * @mbox: mailbox to use for the FW RESET command (if desired)
6817 * @force: force uP into RESET even if FW RESET command fails
6819 * Issues a RESET command to firmware (if desired) with a HALT indication
6820 * and then puts the microprocessor into RESET state. The RESET command
6821 * will only be issued if a legitimate mailbox is provided (mbox <=
6822 * M_PCIE_FW_MASTER).
6824 * This is generally used in order for the host to safely manipulate the
6825 * adapter without fear of conflicting with whatever the firmware might
6826 * be doing. The only way out of this state is to RESTART the firmware
6829 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
6834 * If a legitimate mailbox is provided, issue a RESET command
6835 * with a HALT indication.
6837 if (mbox <= M_PCIE_FW_MASTER) {
6838 struct fw_reset_cmd c;
6840 memset(&c, 0, sizeof(c));
6841 INIT_CMD(c, RESET, WRITE);
6842 c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
6843 c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
6844 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6848 * Normally we won't complete the operation if the firmware RESET
6849 * command fails but if our caller insists we'll go ahead and put the
6850 * uP into RESET. This can be useful if the firmware is hung or even
6851 * missing ... We'll have to take the risk of putting the uP into
6852 * RESET without the cooperation of firmware in that case.
6854 * We also force the firmware's HALT flag to be on in case we bypassed
6855 * the firmware RESET command above or we're dealing with old firmware
6856 * which doesn't have the HALT capability. This will serve as a flag
6857 * for the incoming firmware to know that it's coming out of a HALT
6858 * rather than a RESET ... if it's new enough to understand that ...
6860 if (ret == 0 || force) {
6861 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
6862 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
6867 * And we always return the result of the firmware RESET command
6868 * even when we force the uP into RESET ...
6874 * t4_fw_restart - restart the firmware by taking the uP out of RESET
6875 * @adap: the adapter
6876 * @reset: if we want to do a RESET to restart things
6878 * Restart firmware previously halted by t4_fw_halt(). On successful
6879 * return the previous PF Master remains as the new PF Master and there
6880 * is no need to issue a new HELLO command, etc.
6882 * We do this in two ways:
6884 * 1. If we're dealing with newer firmware we'll simply want to take
6885 * the chip's microprocessor out of RESET. This will cause the
6886 * firmware to start up from its start vector. And then we'll loop
6887 * until the firmware indicates it's started again (PCIE_FW.HALT
6888 * reset to 0) or we timeout.
6890 * 2. If we're dealing with older firmware then we'll need to RESET
6891 * the chip since older firmware won't recognize the PCIE_FW.HALT
6892 * flag and automatically RESET itself on startup.
6894 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
6898 * Since we're directing the RESET instead of the firmware
6899 * doing it automatically, we need to clear the PCIE_FW.HALT
6902 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
6905 * If we've been given a valid mailbox, first try to get the
6906 * firmware to do the RESET. If that works, great and we can
6907 * return success. Otherwise, if we haven't been given a
6908 * valid mailbox or the RESET command failed, fall back to
6909 * hitting the chip with a hammer.
6911 if (mbox <= M_PCIE_FW_MASTER) {
6912 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
6914 if (t4_fw_reset(adap, mbox,
6915 F_PIORST | F_PIORSTMODE) == 0)
6919 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
6924 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
6925 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
6926 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
6937 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
6938 * @adap: the adapter
6939 * @mbox: mailbox to use for the FW RESET command (if desired)
6940 * @fw_data: the firmware image to write
6942 * @force: force upgrade even if firmware doesn't cooperate
6944 * Perform all of the steps necessary for upgrading an adapter's
6945 * firmware image. Normally this requires the cooperation of the
6946 * existing firmware in order to halt all existing activities
6947 * but if an invalid mailbox token is passed in we skip that step
6948 * (though we'll still put the adapter microprocessor into RESET in
6951 * On successful return the new firmware will have been loaded and
6952 * the adapter will have been fully RESET losing all previous setup
6953 * state. On unsuccessful return the adapter may be completely hosed ...
6954 * positive errno indicates that the adapter is ~probably~ intact, a
6955 * negative errno indicates that things are looking bad ...
6957 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
6958 const u8 *fw_data, unsigned int size, int force)
6960 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
6961 unsigned int bootstrap =
6962 be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
6965 if (!t4_fw_matches_chip(adap, fw_hdr))
6969 ret = t4_fw_halt(adap, mbox, force);
6970 if (ret < 0 && !force)
6974 ret = t4_load_fw(adap, fw_data, size);
6975 if (ret < 0 || bootstrap)
6979 * Older versions of the firmware don't understand the new
6980 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
6981 * restart. So for newly loaded older firmware we'll have to do the
6982 * RESET for it so it starts up on a clean slate. We can tell if
6983 * the newly loaded firmware will handle this right by checking
6984 * its header flags to see if it advertises the capability.
6986 reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
6987 return t4_fw_restart(adap, mbox, reset);
6991 * Card doesn't have a firmware, install one.
6993 int t4_fw_forceinstall(struct adapter *adap, const u8 *fw_data,
6996 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
6997 unsigned int bootstrap =
6998 be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
7001 if (!t4_fw_matches_chip(adap, fw_hdr) || bootstrap)
7004 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
7005 t4_write_reg(adap, A_PCIE_FW, 0); /* Clobber internal state */
7006 ret = t4_load_fw(adap, fw_data, size);
7009 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
7016 * t4_fw_initialize - ask FW to initialize the device
7017 * @adap: the adapter
7018 * @mbox: mailbox to use for the FW command
7020 * Issues a command to FW to partially initialize the device. This
7021 * performs initialization that generally doesn't depend on user input.
7023 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
7025 struct fw_initialize_cmd c;
7027 memset(&c, 0, sizeof(c));
7028 INIT_CMD(c, INITIALIZE, WRITE);
7029 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7033 * t4_query_params_rw - query FW or device parameters
7034 * @adap: the adapter
7035 * @mbox: mailbox to use for the FW command
7038 * @nparams: the number of parameters
7039 * @params: the parameter names
7040 * @val: the parameter values
7041 * @rw: Write and read flag
7043 * Reads the value of FW or device parameters. Up to 7 parameters can be
7046 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
7047 unsigned int vf, unsigned int nparams, const u32 *params,
7051 struct fw_params_cmd c;
7052 __be32 *p = &c.param[0].mnem;
7057 memset(&c, 0, sizeof(c));
7058 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
7059 F_FW_CMD_REQUEST | F_FW_CMD_READ |
7060 V_FW_PARAMS_CMD_PFN(pf) |
7061 V_FW_PARAMS_CMD_VFN(vf));
7062 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7064 for (i = 0; i < nparams; i++) {
7065 *p++ = cpu_to_be32(*params++);
7067 *p = cpu_to_be32(*(val + i));
7071 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7073 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
7074 *val++ = be32_to_cpu(*p);
7078 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7079 unsigned int vf, unsigned int nparams, const u32 *params,
7082 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
7086 * t4_set_params_timeout - sets FW or device parameters
7087 * @adap: the adapter
7088 * @mbox: mailbox to use for the FW command
7091 * @nparams: the number of parameters
7092 * @params: the parameter names
7093 * @val: the parameter values
7094 * @timeout: the timeout time
7096 * Sets the value of FW or device parameters. Up to 7 parameters can be
7097 * specified at once.
7099 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
7100 unsigned int pf, unsigned int vf,
7101 unsigned int nparams, const u32 *params,
7102 const u32 *val, int timeout)
7104 struct fw_params_cmd c;
7105 __be32 *p = &c.param[0].mnem;
7110 memset(&c, 0, sizeof(c));
7111 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
7112 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7113 V_FW_PARAMS_CMD_PFN(pf) |
7114 V_FW_PARAMS_CMD_VFN(vf));
7115 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7118 *p++ = cpu_to_be32(*params++);
7119 *p++ = cpu_to_be32(*val++);
7122 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
7126 * t4_set_params - sets FW or device parameters
7127 * @adap: the adapter
7128 * @mbox: mailbox to use for the FW command
7131 * @nparams: the number of parameters
7132 * @params: the parameter names
7133 * @val: the parameter values
7135 * Sets the value of FW or device parameters. Up to 7 parameters can be
7136 * specified at once.
7138 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7139 unsigned int vf, unsigned int nparams, const u32 *params,
7142 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
7143 FW_CMD_MAX_TIMEOUT);
7147 * t4_cfg_pfvf - configure PF/VF resource limits
7148 * @adap: the adapter
7149 * @mbox: mailbox to use for the FW command
7150 * @pf: the PF being configured
7151 * @vf: the VF being configured
7152 * @txq: the max number of egress queues
7153 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
7154 * @rxqi: the max number of interrupt-capable ingress queues
7155 * @rxq: the max number of interruptless ingress queues
7156 * @tc: the PCI traffic class
7157 * @vi: the max number of virtual interfaces
7158 * @cmask: the channel access rights mask for the PF/VF
7159 * @pmask: the port access rights mask for the PF/VF
7160 * @nexact: the maximum number of exact MPS filters
7161 * @rcaps: read capabilities
7162 * @wxcaps: write/execute capabilities
7164 * Configures resource limits and capabilities for a physical or virtual
7167 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
7168 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
7169 unsigned int rxqi, unsigned int rxq, unsigned int tc,
7170 unsigned int vi, unsigned int cmask, unsigned int pmask,
7171 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
7173 struct fw_pfvf_cmd c;
7175 memset(&c, 0, sizeof(c));
7176 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
7177 F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
7178 V_FW_PFVF_CMD_VFN(vf));
7179 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7180 c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
7181 V_FW_PFVF_CMD_NIQ(rxq));
7182 c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) |
7183 V_FW_PFVF_CMD_PMASK(pmask) |
7184 V_FW_PFVF_CMD_NEQ(txq));
7185 c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) |
7186 V_FW_PFVF_CMD_NVI(vi) |
7187 V_FW_PFVF_CMD_NEXACTF(nexact));
7188 c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) |
7189 V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
7190 V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
7191 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7195 * t4_alloc_vi_func - allocate a virtual interface
7196 * @adap: the adapter
7197 * @mbox: mailbox to use for the FW command
7198 * @port: physical port associated with the VI
7199 * @pf: the PF owning the VI
7200 * @vf: the VF owning the VI
7201 * @nmac: number of MAC addresses needed (1 to 5)
7202 * @mac: the MAC addresses of the VI
7203 * @rss_size: size of RSS table slice associated with this VI
7204 * @portfunc: which Port Application Function MAC Address is desired
7205 * @idstype: Intrusion Detection Type
7207 * Allocates a virtual interface for the given physical port. If @mac is
7208 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
7209 * If @rss_size is %NULL the VI is not assigned any RSS slice by FW.
7210 * @mac should be large enough to hold @nmac Ethernet addresses, they are
7211 * stored consecutively so the space needed is @nmac * 6 bytes.
7212 * Returns a negative error number or the non-negative VI id.
7214 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
7215 unsigned int port, unsigned int pf, unsigned int vf,
7216 unsigned int nmac, u8 *mac, u16 *rss_size,
7217 unsigned int portfunc, unsigned int idstype)
7222 memset(&c, 0, sizeof(c));
7223 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
7224 F_FW_CMD_WRITE | F_FW_CMD_EXEC |
7225 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
7226 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
7227 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
7228 V_FW_VI_CMD_FUNC(portfunc));
7229 c.portid_pkd = V_FW_VI_CMD_PORTID(port);
7232 c.norss_rsssize = F_FW_VI_CMD_NORSS;
7234 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7239 memcpy(mac, c.mac, sizeof(c.mac));
7242 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
7244 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
7246 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
7248 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
7252 *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
7253 return G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid));
7257 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface
7258 * @adap: the adapter
7259 * @mbox: mailbox to use for the FW command
7260 * @port: physical port associated with the VI
7261 * @pf: the PF owning the VI
7262 * @vf: the VF owning the VI
7263 * @nmac: number of MAC addresses needed (1 to 5)
7264 * @mac: the MAC addresses of the VI
7265 * @rss_size: size of RSS table slice associated with this VI
7267 * backwards compatible and convieniance routine to allocate a Virtual
7268 * Interface with a Ethernet Port Application Function and Intrustion
7269 * Detection System disabled.
7271 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
7272 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
7275 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
7280 * t4_free_vi - free a virtual interface
7281 * @adap: the adapter
7282 * @mbox: mailbox to use for the FW command
7283 * @pf: the PF owning the VI
7284 * @vf: the VF owning the VI
7285 * @viid: virtual interface identifiler
7287 * Free a previously allocated virtual interface.
7289 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
7290 unsigned int vf, unsigned int viid)
7294 memset(&c, 0, sizeof(c));
7295 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
7298 V_FW_VI_CMD_PFN(pf) |
7299 V_FW_VI_CMD_VFN(vf));
7300 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
7301 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
7303 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7307 * t4_set_rxmode - set Rx properties of a virtual interface
7308 * @adap: the adapter
7309 * @mbox: mailbox to use for the FW command
7311 * @mtu: the new MTU or -1
7312 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
7313 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
7314 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
7315 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
7316 * @sleep_ok: if true we may sleep while awaiting command completion
7318 * Sets Rx properties of a virtual interface.
7320 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
7321 int mtu, int promisc, int all_multi, int bcast, int vlanex,
7324 struct fw_vi_rxmode_cmd c;
7326 /* convert to FW values */
7328 mtu = M_FW_VI_RXMODE_CMD_MTU;
7330 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
7332 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
7334 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
7336 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
7338 memset(&c, 0, sizeof(c));
7339 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
7340 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7341 V_FW_VI_RXMODE_CMD_VIID(viid));
7342 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7344 cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
7345 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
7346 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
7347 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
7348 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
7349 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7353 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
7354 * @adap: the adapter
7355 * @mbox: mailbox to use for the FW command
7357 * @free: if true any existing filters for this VI id are first removed
7358 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
7359 * @addr: the MAC address(es)
7360 * @idx: where to store the index of each allocated filter
7361 * @hash: pointer to hash address filter bitmap
7362 * @sleep_ok: call is allowed to sleep
7364 * Allocates an exact-match filter for each of the supplied addresses and
7365 * sets it to the corresponding address. If @idx is not %NULL it should
7366 * have at least @naddr entries, each of which will be set to the index of
7367 * the filter allocated for the corresponding MAC address. If a filter
7368 * could not be allocated for an address its index is set to 0xffff.
7369 * If @hash is not %NULL addresses that fail to allocate an exact filter
7370 * are hashed and update the hash filter bitmap pointed at by @hash.
7372 * Returns a negative error number or the number of filters allocated.
7374 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
7375 unsigned int viid, bool free, unsigned int naddr,
7376 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
7378 int offset, ret = 0;
7379 struct fw_vi_mac_cmd c;
7380 unsigned int nfilters = 0;
7381 unsigned int max_naddr = adap->chip_params->mps_tcam_size;
7382 unsigned int rem = naddr;
7384 if (naddr > max_naddr)
7387 for (offset = 0; offset < naddr ; /**/) {
7388 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
7390 : ARRAY_SIZE(c.u.exact));
7391 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
7392 u.exact[fw_naddr]), 16);
7393 struct fw_vi_mac_exact *p;
7396 memset(&c, 0, sizeof(c));
7397 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
7400 V_FW_CMD_EXEC(free) |
7401 V_FW_VI_MAC_CMD_VIID(viid));
7402 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) |
7403 V_FW_CMD_LEN16(len16));
7405 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7407 cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
7408 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
7409 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
7413 * It's okay if we run out of space in our MAC address arena.
7414 * Some of the addresses we submit may get stored so we need
7415 * to run through the reply to see what the results were ...
7417 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7418 if (ret && ret != -FW_ENOMEM)
7421 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7422 u16 index = G_FW_VI_MAC_CMD_IDX(
7423 be16_to_cpu(p->valid_to_idx));
7426 idx[offset+i] = (index >= max_naddr
7429 if (index < max_naddr)
7432 *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
7440 if (ret == 0 || ret == -FW_ENOMEM)
7446 * t4_change_mac - modifies the exact-match filter for a MAC address
7447 * @adap: the adapter
7448 * @mbox: mailbox to use for the FW command
7450 * @idx: index of existing filter for old value of MAC address, or -1
7451 * @addr: the new MAC address value
7452 * @persist: whether a new MAC allocation should be persistent
7453 * @add_smt: if true also add the address to the HW SMT
7455 * Modifies an exact-match filter and sets it to the new MAC address if
7456 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
7457 * latter case the address is added persistently if @persist is %true.
7459 * Note that in general it is not possible to modify the value of a given
7460 * filter so the generic way to modify an address filter is to free the one
7461 * being used by the old address value and allocate a new filter for the
7462 * new address value.
7464 * Returns a negative error number or the index of the filter with the new
7465 * MAC value. Note that this index may differ from @idx.
7467 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
7468 int idx, const u8 *addr, bool persist, bool add_smt)
7471 struct fw_vi_mac_cmd c;
7472 struct fw_vi_mac_exact *p = c.u.exact;
7473 unsigned int max_mac_addr = adap->chip_params->mps_tcam_size;
7475 if (idx < 0) /* new allocation */
7476 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
7477 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
7479 memset(&c, 0, sizeof(c));
7480 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
7481 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7482 V_FW_VI_MAC_CMD_VIID(viid));
7483 c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1));
7484 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
7485 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
7486 V_FW_VI_MAC_CMD_IDX(idx));
7487 memcpy(p->macaddr, addr, sizeof(p->macaddr));
7489 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7491 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
7492 if (ret >= max_mac_addr)
7499 * t4_set_addr_hash - program the MAC inexact-match hash filter
7500 * @adap: the adapter
7501 * @mbox: mailbox to use for the FW command
7503 * @ucast: whether the hash filter should also match unicast addresses
7504 * @vec: the value to be written to the hash filter
7505 * @sleep_ok: call is allowed to sleep
7507 * Sets the 64-bit inexact-match hash filter for a virtual interface.
7509 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
7510 bool ucast, u64 vec, bool sleep_ok)
7512 struct fw_vi_mac_cmd c;
7515 memset(&c, 0, sizeof(c));
7516 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
7517 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7518 V_FW_VI_ENABLE_CMD_VIID(viid));
7519 val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) |
7520 V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1);
7521 c.freemacs_to_len16 = cpu_to_be32(val);
7522 c.u.hash.hashvec = cpu_to_be64(vec);
7523 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7527 * t4_enable_vi_params - enable/disable a virtual interface
7528 * @adap: the adapter
7529 * @mbox: mailbox to use for the FW command
7531 * @rx_en: 1=enable Rx, 0=disable Rx
7532 * @tx_en: 1=enable Tx, 0=disable Tx
7533 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
7535 * Enables/disables a virtual interface. Note that setting DCB Enable
7536 * only makes sense when enabling a Virtual Interface ...
7538 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
7539 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
7541 struct fw_vi_enable_cmd c;
7543 memset(&c, 0, sizeof(c));
7544 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
7545 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7546 V_FW_VI_ENABLE_CMD_VIID(viid));
7547 c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
7548 V_FW_VI_ENABLE_CMD_EEN(tx_en) |
7549 V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
7551 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
7555 * t4_enable_vi - enable/disable a virtual interface
7556 * @adap: the adapter
7557 * @mbox: mailbox to use for the FW command
7559 * @rx_en: 1=enable Rx, 0=disable Rx
7560 * @tx_en: 1=enable Tx, 0=disable Tx
7562 * Enables/disables a virtual interface. Note that setting DCB Enable
7563 * only makes sense when enabling a Virtual Interface ...
7565 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
7566 bool rx_en, bool tx_en)
7568 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
7572 * t4_identify_port - identify a VI's port by blinking its LED
7573 * @adap: the adapter
7574 * @mbox: mailbox to use for the FW command
7576 * @nblinks: how many times to blink LED at 2.5 Hz
7578 * Identifies a VI's port by blinking its LED.
7580 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
7581 unsigned int nblinks)
7583 struct fw_vi_enable_cmd c;
7585 memset(&c, 0, sizeof(c));
7586 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
7587 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7588 V_FW_VI_ENABLE_CMD_VIID(viid));
7589 c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
7590 c.blinkdur = cpu_to_be16(nblinks);
7591 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7595 * t4_iq_stop - stop an ingress queue and its FLs
7596 * @adap: the adapter
7597 * @mbox: mailbox to use for the FW command
7598 * @pf: the PF owning the queues
7599 * @vf: the VF owning the queues
7600 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
7601 * @iqid: ingress queue id
7602 * @fl0id: FL0 queue id or 0xffff if no attached FL0
7603 * @fl1id: FL1 queue id or 0xffff if no attached FL1
7605 * Stops an ingress queue and its associated FLs, if any. This causes
7606 * any current or future data/messages destined for these queues to be
7609 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
7610 unsigned int vf, unsigned int iqtype, unsigned int iqid,
7611 unsigned int fl0id, unsigned int fl1id)
7615 memset(&c, 0, sizeof(c));
7616 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
7617 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
7618 V_FW_IQ_CMD_VFN(vf));
7619 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c));
7620 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
7621 c.iqid = cpu_to_be16(iqid);
7622 c.fl0id = cpu_to_be16(fl0id);
7623 c.fl1id = cpu_to_be16(fl1id);
7624 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7628 * t4_iq_free - free an ingress queue and its FLs
7629 * @adap: the adapter
7630 * @mbox: mailbox to use for the FW command
7631 * @pf: the PF owning the queues
7632 * @vf: the VF owning the queues
7633 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
7634 * @iqid: ingress queue id
7635 * @fl0id: FL0 queue id or 0xffff if no attached FL0
7636 * @fl1id: FL1 queue id or 0xffff if no attached FL1
7638 * Frees an ingress queue and its associated FLs, if any.
7640 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7641 unsigned int vf, unsigned int iqtype, unsigned int iqid,
7642 unsigned int fl0id, unsigned int fl1id)
7646 memset(&c, 0, sizeof(c));
7647 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
7648 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
7649 V_FW_IQ_CMD_VFN(vf));
7650 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
7651 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
7652 c.iqid = cpu_to_be16(iqid);
7653 c.fl0id = cpu_to_be16(fl0id);
7654 c.fl1id = cpu_to_be16(fl1id);
7655 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7659 * t4_eth_eq_free - free an Ethernet egress queue
7660 * @adap: the adapter
7661 * @mbox: mailbox to use for the FW command
7662 * @pf: the PF owning the queue
7663 * @vf: the VF owning the queue
7664 * @eqid: egress queue id
7666 * Frees an Ethernet egress queue.
7668 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7669 unsigned int vf, unsigned int eqid)
7671 struct fw_eq_eth_cmd c;
7673 memset(&c, 0, sizeof(c));
7674 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
7675 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7676 V_FW_EQ_ETH_CMD_PFN(pf) |
7677 V_FW_EQ_ETH_CMD_VFN(vf));
7678 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
7679 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
7680 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7684 * t4_ctrl_eq_free - free a control egress queue
7685 * @adap: the adapter
7686 * @mbox: mailbox to use for the FW command
7687 * @pf: the PF owning the queue
7688 * @vf: the VF owning the queue
7689 * @eqid: egress queue id
7691 * Frees a control egress queue.
7693 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7694 unsigned int vf, unsigned int eqid)
7696 struct fw_eq_ctrl_cmd c;
7698 memset(&c, 0, sizeof(c));
7699 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
7700 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7701 V_FW_EQ_CTRL_CMD_PFN(pf) |
7702 V_FW_EQ_CTRL_CMD_VFN(vf));
7703 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
7704 c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
7705 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7709 * t4_ofld_eq_free - free an offload egress queue
7710 * @adap: the adapter
7711 * @mbox: mailbox to use for the FW command
7712 * @pf: the PF owning the queue
7713 * @vf: the VF owning the queue
7714 * @eqid: egress queue id
7716 * Frees a control egress queue.
7718 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7719 unsigned int vf, unsigned int eqid)
7721 struct fw_eq_ofld_cmd c;
7723 memset(&c, 0, sizeof(c));
7724 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) |
7725 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7726 V_FW_EQ_OFLD_CMD_PFN(pf) |
7727 V_FW_EQ_OFLD_CMD_VFN(vf));
7728 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
7729 c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid));
7730 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7734 * t4_link_down_rc_str - return a string for a Link Down Reason Code
7735 * @link_down_rc: Link Down Reason Code
7737 * Returns a string representation of the Link Down Reason Code.
7739 const char *t4_link_down_rc_str(unsigned char link_down_rc)
7741 static const char *reason[] = {
7744 "Auto-negotiation Failure",
7746 "Insufficient Airflow",
7747 "Unable To Determine Reason",
7748 "No RX Signal Detected",
7752 if (link_down_rc >= ARRAY_SIZE(reason))
7753 return "Bad Reason Code";
7755 return reason[link_down_rc];
7759 * Return the highest speed set in the port capabilities, in Mb/s.
7761 unsigned int fwcap_to_speed(uint32_t caps)
7763 #define TEST_SPEED_RETURN(__caps_speed, __speed) \
7765 if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
7769 TEST_SPEED_RETURN(400G, 400000);
7770 TEST_SPEED_RETURN(200G, 200000);
7771 TEST_SPEED_RETURN(100G, 100000);
7772 TEST_SPEED_RETURN(50G, 50000);
7773 TEST_SPEED_RETURN(40G, 40000);
7774 TEST_SPEED_RETURN(25G, 25000);
7775 TEST_SPEED_RETURN(10G, 10000);
7776 TEST_SPEED_RETURN(1G, 1000);
7777 TEST_SPEED_RETURN(100M, 100);
7779 #undef TEST_SPEED_RETURN
7785 * Return the port capabilities bit for the given speed, which is in Mb/s.
7787 uint32_t speed_to_fwcap(unsigned int speed)
7789 #define TEST_SPEED_RETURN(__caps_speed, __speed) \
7791 if (speed == __speed) \
7792 return FW_PORT_CAP32_SPEED_##__caps_speed; \
7795 TEST_SPEED_RETURN(400G, 400000);
7796 TEST_SPEED_RETURN(200G, 200000);
7797 TEST_SPEED_RETURN(100G, 100000);
7798 TEST_SPEED_RETURN(50G, 50000);
7799 TEST_SPEED_RETURN(40G, 40000);
7800 TEST_SPEED_RETURN(25G, 25000);
7801 TEST_SPEED_RETURN(10G, 10000);
7802 TEST_SPEED_RETURN(1G, 1000);
7803 TEST_SPEED_RETURN(100M, 100);
7805 #undef TEST_SPEED_RETURN
7811 * Return the port capabilities bit for the highest speed in the capabilities.
7813 uint32_t fwcap_top_speed(uint32_t caps)
7815 #define TEST_SPEED_RETURN(__caps_speed) \
7817 if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
7818 return FW_PORT_CAP32_SPEED_##__caps_speed; \
7821 TEST_SPEED_RETURN(400G);
7822 TEST_SPEED_RETURN(200G);
7823 TEST_SPEED_RETURN(100G);
7824 TEST_SPEED_RETURN(50G);
7825 TEST_SPEED_RETURN(40G);
7826 TEST_SPEED_RETURN(25G);
7827 TEST_SPEED_RETURN(10G);
7828 TEST_SPEED_RETURN(1G);
7829 TEST_SPEED_RETURN(100M);
7831 #undef TEST_SPEED_RETURN
7838 * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities
7839 * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value
7841 * Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new
7842 * 32-bit Port Capabilities value.
7844 static uint32_t lstatus_to_fwcap(u32 lstatus)
7846 uint32_t linkattr = 0;
7849 * Unfortunately the format of the Link Status in the old
7850 * 16-bit Port Information message isn't the same as the
7851 * 16-bit Port Capabilities bitfield used everywhere else ...
7853 if (lstatus & F_FW_PORT_CMD_RXPAUSE)
7854 linkattr |= FW_PORT_CAP32_FC_RX;
7855 if (lstatus & F_FW_PORT_CMD_TXPAUSE)
7856 linkattr |= FW_PORT_CAP32_FC_TX;
7857 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
7858 linkattr |= FW_PORT_CAP32_SPEED_100M;
7859 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
7860 linkattr |= FW_PORT_CAP32_SPEED_1G;
7861 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
7862 linkattr |= FW_PORT_CAP32_SPEED_10G;
7863 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
7864 linkattr |= FW_PORT_CAP32_SPEED_25G;
7865 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
7866 linkattr |= FW_PORT_CAP32_SPEED_40G;
7867 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
7868 linkattr |= FW_PORT_CAP32_SPEED_100G;
7874 * Updates all fields owned by the common code in port_info and link_config
7875 * based on information provided by the firmware. Does not touch any
7876 * requested_* field.
7878 static void handle_port_info(struct port_info *pi, const struct fw_port_cmd *p,
7879 enum fw_port_action action, bool *mod_changed, bool *link_changed)
7881 struct link_config old_lc, *lc = &pi->link_cfg;
7882 unsigned char fc, fec;
7884 int old_ptype, old_mtype;
7886 old_ptype = pi->port_type;
7887 old_mtype = pi->mod_type;
7889 if (action == FW_PORT_ACTION_GET_PORT_INFO) {
7890 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
7892 pi->port_type = G_FW_PORT_CMD_PTYPE(stat);
7893 pi->mod_type = G_FW_PORT_CMD_MODTYPE(stat);
7894 pi->mdio_addr = stat & F_FW_PORT_CMD_MDIOCAP ?
7895 G_FW_PORT_CMD_MDIOADDR(stat) : -1;
7897 lc->supported = fwcaps16_to_caps32(be16_to_cpu(p->u.info.pcap));
7898 lc->advertising = fwcaps16_to_caps32(be16_to_cpu(p->u.info.acap));
7899 lc->lp_advertising = fwcaps16_to_caps32(be16_to_cpu(p->u.info.lpacap));
7900 lc->link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
7901 lc->link_down_rc = G_FW_PORT_CMD_LINKDNRC(stat);
7903 linkattr = lstatus_to_fwcap(stat);
7904 } else if (action == FW_PORT_ACTION_GET_PORT_INFO32) {
7905 stat = be32_to_cpu(p->u.info32.lstatus32_to_cbllen32);
7907 pi->port_type = G_FW_PORT_CMD_PORTTYPE32(stat);
7908 pi->mod_type = G_FW_PORT_CMD_MODTYPE32(stat);
7909 pi->mdio_addr = stat & F_FW_PORT_CMD_MDIOCAP32 ?
7910 G_FW_PORT_CMD_MDIOADDR32(stat) : -1;
7912 lc->supported = be32_to_cpu(p->u.info32.pcaps32);
7913 lc->advertising = be32_to_cpu(p->u.info32.acaps32);
7914 lc->lp_advertising = be16_to_cpu(p->u.info32.lpacaps32);
7915 lc->link_ok = (stat & F_FW_PORT_CMD_LSTATUS32) != 0;
7916 lc->link_down_rc = G_FW_PORT_CMD_LINKDNRC32(stat);
7918 linkattr = be32_to_cpu(p->u.info32.linkattr32);
7920 CH_ERR(pi->adapter, "bad port_info action 0x%x\n", action);
7924 lc->speed = fwcap_to_speed(linkattr);
7927 if (linkattr & FW_PORT_CAP32_FC_RX)
7929 if (linkattr & FW_PORT_CAP32_FC_TX)
7934 if (linkattr & FW_PORT_CAP32_FEC_RS)
7936 if (linkattr & FW_PORT_CAP32_FEC_BASER_RS)
7937 fec |= FEC_BASER_RS;
7940 if (mod_changed != NULL)
7941 *mod_changed = false;
7942 if (link_changed != NULL)
7943 *link_changed = false;
7944 if (old_ptype != pi->port_type || old_mtype != pi->mod_type ||
7945 old_lc.supported != lc->supported) {
7946 if (pi->mod_type != FW_PORT_MOD_TYPE_NONE) {
7947 lc->fec_hint = lc->advertising &
7948 V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC);
7950 if (mod_changed != NULL)
7951 *mod_changed = true;
7953 if (old_lc.link_ok != lc->link_ok || old_lc.speed != lc->speed ||
7954 old_lc.fec != lc->fec || old_lc.fc != lc->fc) {
7955 if (link_changed != NULL)
7956 *link_changed = true;
7961 * t4_update_port_info - retrieve and update port information if changed
7962 * @pi: the port_info
7964 * We issue a Get Port Information Command to the Firmware and, if
7965 * successful, we check to see if anything is different from what we
7966 * last recorded and update things accordingly.
7968 int t4_update_port_info(struct port_info *pi)
7970 struct adapter *sc = pi->adapter;
7971 struct fw_port_cmd cmd;
7972 enum fw_port_action action;
7975 memset(&cmd, 0, sizeof(cmd));
7976 cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
7977 F_FW_CMD_REQUEST | F_FW_CMD_READ |
7978 V_FW_PORT_CMD_PORTID(pi->tx_chan));
7979 action = sc->params.port_caps32 ? FW_PORT_ACTION_GET_PORT_INFO32 :
7980 FW_PORT_ACTION_GET_PORT_INFO;
7981 cmd.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(action) |
7983 ret = t4_wr_mbox_ns(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
7987 handle_port_info(pi, &cmd, action, NULL, NULL);
7992 * t4_handle_fw_rpl - process a FW reply message
7993 * @adap: the adapter
7994 * @rpl: start of the FW message
7996 * Processes a FW message, such as link state change messages.
7998 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
8000 u8 opcode = *(const u8 *)rpl;
8001 const struct fw_port_cmd *p = (const void *)rpl;
8002 enum fw_port_action action =
8003 G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
8004 bool mod_changed, link_changed;
8006 if (opcode == FW_PORT_CMD &&
8007 (action == FW_PORT_ACTION_GET_PORT_INFO ||
8008 action == FW_PORT_ACTION_GET_PORT_INFO32)) {
8009 /* link/module state change message */
8011 int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
8012 struct port_info *pi = NULL;
8013 struct link_config *lc;
8015 for_each_port(adap, i) {
8016 pi = adap2pinfo(adap, i);
8017 if (pi->tx_chan == chan)
8023 handle_port_info(pi, p, action, &mod_changed, &link_changed);
8026 t4_os_portmod_changed(pi);
8029 t4_os_link_changed(pi);
8033 CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode);
8040 * get_pci_mode - determine a card's PCI mode
8041 * @adapter: the adapter
8042 * @p: where to store the PCI settings
8044 * Determines a card's PCI mode and associated parameters, such as speed
8047 static void get_pci_mode(struct adapter *adapter,
8048 struct pci_params *p)
8053 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
8055 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
8056 p->speed = val & PCI_EXP_LNKSTA_CLS;
8057 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
8062 u32 vendor_and_model_id;
8066 int t4_get_flash_params(struct adapter *adapter)
8069 * Table for non-standard supported Flash parts. Note, all Flash
8070 * parts must have 64KB sectors.
8072 static struct flash_desc supported_flash[] = {
8073 { 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
8078 unsigned int part, manufacturer;
8079 unsigned int density, size = 0;
8083 * Issue a Read ID Command to the Flash part. We decode supported
8084 * Flash parts and their sizes from this. There's a newer Query
8085 * Command which can retrieve detailed geometry information but many
8086 * Flash parts don't support it.
8088 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
8090 ret = sf1_read(adapter, 3, 0, 1, &flashid);
8091 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
8096 * Check to see if it's one of our non-standard supported Flash parts.
8098 for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
8099 if (supported_flash[part].vendor_and_model_id == flashid) {
8100 adapter->params.sf_size =
8101 supported_flash[part].size_mb;
8102 adapter->params.sf_nsec =
8103 adapter->params.sf_size / SF_SEC_SIZE;
8108 * Decode Flash part size. The code below looks repetative with
8109 * common encodings, but that's not guaranteed in the JEDEC
8110 * specification for the Read JADEC ID command. The only thing that
8111 * we're guaranteed by the JADEC specification is where the
8112 * Manufacturer ID is in the returned result. After that each
8113 * Manufacturer ~could~ encode things completely differently.
8114 * Note, all Flash parts must have 64KB sectors.
8116 manufacturer = flashid & 0xff;
8117 switch (manufacturer) {
8118 case 0x20: /* Micron/Numonix */
8120 * This Density -> Size decoding table is taken from Micron
8123 density = (flashid >> 16) & 0xff;
8125 case 0x14: size = 1 << 20; break; /* 1MB */
8126 case 0x15: size = 1 << 21; break; /* 2MB */
8127 case 0x16: size = 1 << 22; break; /* 4MB */
8128 case 0x17: size = 1 << 23; break; /* 8MB */
8129 case 0x18: size = 1 << 24; break; /* 16MB */
8130 case 0x19: size = 1 << 25; break; /* 32MB */
8131 case 0x20: size = 1 << 26; break; /* 64MB */
8132 case 0x21: size = 1 << 27; break; /* 128MB */
8133 case 0x22: size = 1 << 28; break; /* 256MB */
8137 case 0x9d: /* ISSI -- Integrated Silicon Solution, Inc. */
8139 * This Density -> Size decoding table is taken from ISSI
8142 density = (flashid >> 16) & 0xff;
8144 case 0x16: size = 1 << 25; break; /* 32MB */
8145 case 0x17: size = 1 << 26; break; /* 64MB */
8149 case 0xc2: /* Macronix */
8151 * This Density -> Size decoding table is taken from Macronix
8154 density = (flashid >> 16) & 0xff;
8156 case 0x17: size = 1 << 23; break; /* 8MB */
8157 case 0x18: size = 1 << 24; break; /* 16MB */
8161 case 0xef: /* Winbond */
8163 * This Density -> Size decoding table is taken from Winbond
8166 density = (flashid >> 16) & 0xff;
8168 case 0x17: size = 1 << 23; break; /* 8MB */
8169 case 0x18: size = 1 << 24; break; /* 16MB */
8174 /* If we didn't recognize the FLASH part, that's no real issue: the
8175 * Hardware/Software contract says that Hardware will _*ALWAYS*_
8176 * use a FLASH part which is at least 4MB in size and has 64KB
8177 * sectors. The unrecognized FLASH part is likely to be much larger
8178 * than 4MB, but that's all we really need.
8181 CH_WARN(adapter, "Unknown Flash Part, ID = %#x, assuming 4MB\n", flashid);
8186 * Store decoded Flash size and fall through into vetting code.
8188 adapter->params.sf_size = size;
8189 adapter->params.sf_nsec = size / SF_SEC_SIZE;
8193 * We should ~probably~ reject adapters with FLASHes which are too
8194 * small but we have some legacy FPGAs with small FLASHes that we'd
8195 * still like to use. So instead we emit a scary message ...
8197 if (adapter->params.sf_size < FLASH_MIN_SIZE)
8198 CH_WARN(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
8199 flashid, adapter->params.sf_size, FLASH_MIN_SIZE);
8204 static void set_pcie_completion_timeout(struct adapter *adapter,
8210 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
8212 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
8215 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
8219 const struct chip_params *t4_get_chip_params(int chipid)
8221 static const struct chip_params chip_params[] = {
8225 .pm_stats_cnt = PM_NSTATS,
8226 .cng_ch_bits_log = 2,
8228 .cim_num_obq = CIM_NUM_OBQ,
8229 .mps_rplc_size = 128,
8231 .sge_fl_db = F_DBPRIO,
8232 .mps_tcam_size = NUM_MPS_CLS_SRAM_L_INSTANCES,
8237 .pm_stats_cnt = PM_NSTATS,
8238 .cng_ch_bits_log = 2,
8240 .cim_num_obq = CIM_NUM_OBQ_T5,
8241 .mps_rplc_size = 128,
8243 .sge_fl_db = F_DBPRIO | F_DBTYPE,
8244 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
8249 .pm_stats_cnt = T6_PM_NSTATS,
8250 .cng_ch_bits_log = 3,
8252 .cim_num_obq = CIM_NUM_OBQ_T5,
8253 .mps_rplc_size = 256,
8256 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
8260 chipid -= CHELSIO_T4;
8261 if (chipid < 0 || chipid >= ARRAY_SIZE(chip_params))
8264 return &chip_params[chipid];
8268 * t4_prep_adapter - prepare SW and HW for operation
8269 * @adapter: the adapter
8270 * @buf: temporary space of at least VPD_LEN size provided by the caller.
8272 * Initialize adapter SW state for the various HW modules, set initial
8273 * values for some adapter tunables, take PHYs out of reset, and
8274 * initialize the MDIO interface.
8276 int t4_prep_adapter(struct adapter *adapter, u32 *buf)
8282 get_pci_mode(adapter, &adapter->params.pci);
8284 pl_rev = t4_read_reg(adapter, A_PL_REV);
8285 adapter->params.chipid = G_CHIPID(pl_rev);
8286 adapter->params.rev = G_REV(pl_rev);
8287 if (adapter->params.chipid == 0) {
8288 /* T4 did not have chipid in PL_REV (T5 onwards do) */
8289 adapter->params.chipid = CHELSIO_T4;
8291 /* T4A1 chip is not supported */
8292 if (adapter->params.rev == 1) {
8293 CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n");
8298 adapter->chip_params = t4_get_chip_params(chip_id(adapter));
8299 if (adapter->chip_params == NULL)
8302 adapter->params.pci.vpd_cap_addr =
8303 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
8305 ret = t4_get_flash_params(adapter);
8309 /* Cards with real ASICs have the chipid in the PCIe device id */
8310 t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id);
8311 if (device_id >> 12 == chip_id(adapter))
8312 adapter->params.cim_la_size = CIMLA_SIZE;
8315 adapter->params.fpga = 1;
8316 adapter->params.cim_la_size = 2 * CIMLA_SIZE;
8319 ret = get_vpd_params(adapter, &adapter->params.vpd, device_id, buf);
8323 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
8326 * Default port and clock for debugging in case we can't reach FW.
8328 adapter->params.nports = 1;
8329 adapter->params.portvec = 1;
8330 adapter->params.vpd.cclk = 50000;
8332 /* Set pci completion timeout value to 4 seconds. */
8333 set_pcie_completion_timeout(adapter, 0xd);
8338 * t4_shutdown_adapter - shut down adapter, host & wire
8339 * @adapter: the adapter
8341 * Perform an emergency shutdown of the adapter and stop it from
8342 * continuing any further communication on the ports or DMA to the
8343 * host. This is typically used when the adapter and/or firmware
8344 * have crashed and we want to prevent any further accidental
8345 * communication with the rest of the world. This will also force
8346 * the port Link Status to go down -- if register writes work --
8347 * which should help our peers figure out that we're down.
8349 int t4_shutdown_adapter(struct adapter *adapter)
8353 t4_intr_disable(adapter);
8354 t4_write_reg(adapter, A_DBG_GPIO_EN, 0);
8355 for_each_port(adapter, port) {
8356 u32 a_port_cfg = is_t4(adapter) ?
8357 PORT_REG(port, A_XGMAC_PORT_CFG) :
8358 T5_PORT_REG(port, A_MAC_PORT_CFG);
8360 t4_write_reg(adapter, a_port_cfg,
8361 t4_read_reg(adapter, a_port_cfg)
8362 & ~V_SIGNAL_DET(1));
8364 t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0);
8370 * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
8371 * @adapter: the adapter
8372 * @qid: the Queue ID
8373 * @qtype: the Ingress or Egress type for @qid
8374 * @user: true if this request is for a user mode queue
8375 * @pbar2_qoffset: BAR2 Queue Offset
8376 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
8378 * Returns the BAR2 SGE Queue Registers information associated with the
8379 * indicated Absolute Queue ID. These are passed back in return value
8380 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
8381 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
8383 * This may return an error which indicates that BAR2 SGE Queue
8384 * registers aren't available. If an error is not returned, then the
8385 * following values are returned:
8387 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
8388 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
8390 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
8391 * require the "Inferred Queue ID" ability may be used. E.g. the
8392 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
8393 * then these "Inferred Queue ID" register may not be used.
8395 int t4_bar2_sge_qregs(struct adapter *adapter,
8397 enum t4_bar2_qtype qtype,
8400 unsigned int *pbar2_qid)
8402 unsigned int page_shift, page_size, qpp_shift, qpp_mask;
8403 u64 bar2_page_offset, bar2_qoffset;
8404 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
8406 /* T4 doesn't support BAR2 SGE Queue registers for kernel
8409 if (!user && is_t4(adapter))
8412 /* Get our SGE Page Size parameters.
8414 page_shift = adapter->params.sge.page_shift;
8415 page_size = 1 << page_shift;
8417 /* Get the right Queues per Page parameters for our Queue.
8419 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
8420 ? adapter->params.sge.eq_s_qpp
8421 : adapter->params.sge.iq_s_qpp);
8422 qpp_mask = (1 << qpp_shift) - 1;
8424 /* Calculate the basics of the BAR2 SGE Queue register area:
8425 * o The BAR2 page the Queue registers will be in.
8426 * o The BAR2 Queue ID.
8427 * o The BAR2 Queue ID Offset into the BAR2 page.
8429 bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
8430 bar2_qid = qid & qpp_mask;
8431 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
8433 /* If the BAR2 Queue ID Offset is less than the Page Size, then the
8434 * hardware will infer the Absolute Queue ID simply from the writes to
8435 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
8436 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
8437 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
8438 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
8439 * from the BAR2 Page and BAR2 Queue ID.
8441 * One important censequence of this is that some BAR2 SGE registers
8442 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
8443 * there. But other registers synthesize the SGE Queue ID purely
8444 * from the writes to the registers -- the Write Combined Doorbell
8445 * Buffer is a good example. These BAR2 SGE Registers are only
8446 * available for those BAR2 SGE Register areas where the SGE Absolute
8447 * Queue ID can be inferred from simple writes.
8449 bar2_qoffset = bar2_page_offset;
8450 bar2_qinferred = (bar2_qid_offset < page_size);
8451 if (bar2_qinferred) {
8452 bar2_qoffset += bar2_qid_offset;
8456 *pbar2_qoffset = bar2_qoffset;
8457 *pbar2_qid = bar2_qid;
8462 * t4_init_devlog_params - initialize adapter->params.devlog
8463 * @adap: the adapter
8464 * @fw_attach: whether we can talk to the firmware
8466 * Initialize various fields of the adapter's Firmware Device Log
8467 * Parameters structure.
8469 int t4_init_devlog_params(struct adapter *adap, int fw_attach)
8471 struct devlog_params *dparams = &adap->params.devlog;
8473 unsigned int devlog_meminfo;
8474 struct fw_devlog_cmd devlog_cmd;
8477 /* If we're dealing with newer firmware, the Device Log Paramerters
8478 * are stored in a designated register which allows us to access the
8479 * Device Log even if we can't talk to the firmware.
8482 t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG));
8484 unsigned int nentries, nentries128;
8486 dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams);
8487 dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4;
8489 nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams);
8490 nentries = (nentries128 + 1) * 128;
8491 dparams->size = nentries * sizeof(struct fw_devlog_e);
8497 * For any failing returns ...
8499 memset(dparams, 0, sizeof *dparams);
8502 * If we can't talk to the firmware, there's really nothing we can do
8508 /* Otherwise, ask the firmware for it's Device Log Parameters.
8510 memset(&devlog_cmd, 0, sizeof devlog_cmd);
8511 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
8512 F_FW_CMD_REQUEST | F_FW_CMD_READ);
8513 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
8514 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
8520 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
8521 dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo);
8522 dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4;
8523 dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
8529 * t4_init_sge_params - initialize adap->params.sge
8530 * @adapter: the adapter
8532 * Initialize various fields of the adapter's SGE Parameters structure.
8534 int t4_init_sge_params(struct adapter *adapter)
8537 struct sge_params *sp = &adapter->params.sge;
8538 unsigned i, tscale = 1;
8540 r = t4_read_reg(adapter, A_SGE_INGRESS_RX_THRESHOLD);
8541 sp->counter_val[0] = G_THRESHOLD_0(r);
8542 sp->counter_val[1] = G_THRESHOLD_1(r);
8543 sp->counter_val[2] = G_THRESHOLD_2(r);
8544 sp->counter_val[3] = G_THRESHOLD_3(r);
8546 if (chip_id(adapter) >= CHELSIO_T6) {
8547 r = t4_read_reg(adapter, A_SGE_ITP_CONTROL);
8548 tscale = G_TSCALE(r);
8555 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_0_AND_1);
8556 sp->timer_val[0] = core_ticks_to_us(adapter, G_TIMERVALUE0(r)) * tscale;
8557 sp->timer_val[1] = core_ticks_to_us(adapter, G_TIMERVALUE1(r)) * tscale;
8558 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_2_AND_3);
8559 sp->timer_val[2] = core_ticks_to_us(adapter, G_TIMERVALUE2(r)) * tscale;
8560 sp->timer_val[3] = core_ticks_to_us(adapter, G_TIMERVALUE3(r)) * tscale;
8561 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_4_AND_5);
8562 sp->timer_val[4] = core_ticks_to_us(adapter, G_TIMERVALUE4(r)) * tscale;
8563 sp->timer_val[5] = core_ticks_to_us(adapter, G_TIMERVALUE5(r)) * tscale;
8565 r = t4_read_reg(adapter, A_SGE_CONM_CTRL);
8566 sp->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1;
8568 sp->fl_starve_threshold2 = sp->fl_starve_threshold;
8569 else if (is_t5(adapter))
8570 sp->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(r) * 2 + 1;
8572 sp->fl_starve_threshold2 = G_T6_EGRTHRESHOLDPACKING(r) * 2 + 1;
8574 /* egress queues: log2 of # of doorbells per BAR2 page */
8575 r = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
8576 r >>= S_QUEUESPERPAGEPF0 +
8577 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
8578 sp->eq_s_qpp = r & M_QUEUESPERPAGEPF0;
8580 /* ingress queues: log2 of # of doorbells per BAR2 page */
8581 r = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
8582 r >>= S_QUEUESPERPAGEPF0 +
8583 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
8584 sp->iq_s_qpp = r & M_QUEUESPERPAGEPF0;
8586 r = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
8587 r >>= S_HOSTPAGESIZEPF0 +
8588 (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf;
8589 sp->page_shift = (r & M_HOSTPAGESIZEPF0) + 10;
8591 r = t4_read_reg(adapter, A_SGE_CONTROL);
8592 sp->sge_control = r;
8593 sp->spg_len = r & F_EGRSTATUSPAGESIZE ? 128 : 64;
8594 sp->fl_pktshift = G_PKTSHIFT(r);
8595 if (chip_id(adapter) <= CHELSIO_T5) {
8596 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) +
8597 X_INGPADBOUNDARY_SHIFT);
8599 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) +
8600 X_T6_INGPADBOUNDARY_SHIFT);
8603 sp->pack_boundary = sp->pad_boundary;
8605 r = t4_read_reg(adapter, A_SGE_CONTROL2);
8606 if (G_INGPACKBOUNDARY(r) == 0)
8607 sp->pack_boundary = 16;
8609 sp->pack_boundary = 1 << (G_INGPACKBOUNDARY(r) + 5);
8611 for (i = 0; i < SGE_FLBUF_SIZES; i++)
8612 sp->sge_fl_buffer_size[i] = t4_read_reg(adapter,
8613 A_SGE_FL_BUFFER_SIZE0 + (4 * i));
8619 * Read and cache the adapter's compressed filter mode and ingress config.
8621 static void read_filter_mode_and_ingress_config(struct adapter *adap,
8624 struct tp_params *tpp = &adap->params.tp;
8626 t4_tp_pio_read(adap, &tpp->vlan_pri_map, 1, A_TP_VLAN_PRI_MAP,
8628 t4_tp_pio_read(adap, &tpp->ingress_config, 1, A_TP_INGRESS_CONFIG,
8632 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
8633 * shift positions of several elements of the Compressed Filter Tuple
8634 * for this adapter which we need frequently ...
8636 tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE);
8637 tpp->port_shift = t4_filter_field_shift(adap, F_PORT);
8638 tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
8639 tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN);
8640 tpp->tos_shift = t4_filter_field_shift(adap, F_TOS);
8641 tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
8642 tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE);
8643 tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH);
8644 tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE);
8645 tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION);
8648 * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
8649 * represents the presence of an Outer VLAN instead of a VNIC ID.
8651 if ((tpp->ingress_config & F_VNIC) == 0)
8652 tpp->vnic_shift = -1;
8656 * t4_init_tp_params - initialize adap->params.tp
8657 * @adap: the adapter
8659 * Initialize various fields of the adapter's TP Parameters structure.
8661 int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
8665 struct tp_params *tpp = &adap->params.tp;
8667 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
8668 tpp->tre = G_TIMERRESOLUTION(v);
8669 tpp->dack_re = G_DELAYEDACKRESOLUTION(v);
8671 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
8672 for (chan = 0; chan < MAX_NCHAN; chan++)
8673 tpp->tx_modq[chan] = chan;
8675 read_filter_mode_and_ingress_config(adap, sleep_ok);
8678 * Cache a mask of the bits that represent the error vector portion of
8679 * rx_pkt.err_vec. T6+ can use a compressed error vector to make room
8680 * for information about outer encapsulation (GENEVE/VXLAN/NVGRE).
8682 tpp->err_vec_mask = htobe16(0xffff);
8683 if (chip_id(adap) > CHELSIO_T5) {
8684 v = t4_read_reg(adap, A_TP_OUT_CONFIG);
8685 if (v & F_CRXPKTENC) {
8687 htobe16(V_T6_COMPR_RXERR_VEC(M_T6_COMPR_RXERR_VEC));
8695 * t4_filter_field_shift - calculate filter field shift
8696 * @adap: the adapter
8697 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
8699 * Return the shift position of a filter field within the Compressed
8700 * Filter Tuple. The filter field is specified via its selection bit
8701 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
8703 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
8705 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
8709 if ((filter_mode & filter_sel) == 0)
8712 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
8713 switch (filter_mode & sel) {
8715 field_shift += W_FT_FCOE;
8718 field_shift += W_FT_PORT;
8721 field_shift += W_FT_VNIC_ID;
8724 field_shift += W_FT_VLAN;
8727 field_shift += W_FT_TOS;
8730 field_shift += W_FT_PROTOCOL;
8733 field_shift += W_FT_ETHERTYPE;
8736 field_shift += W_FT_MACMATCH;
8739 field_shift += W_FT_MPSHITTYPE;
8741 case F_FRAGMENTATION:
8742 field_shift += W_FT_FRAGMENTATION;
8749 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id)
8754 struct port_info *p = adap2pinfo(adap, port_id);
8757 for (i = 0, j = -1; i <= p->port_id; i++) {
8760 } while ((adap->params.portvec & (1 << j)) == 0);
8764 p->mps_bg_map = t4_get_mps_bg_map(adap, j);
8765 p->rx_e_chan_map = t4_get_rx_e_chan_map(adap, j);
8768 if (!(adap->flags & IS_VF) ||
8769 adap->params.vfres.r_caps & FW_CMD_CAP_PORT) {
8770 t4_update_port_info(p);
8773 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
8777 p->vi[0].viid = ret;
8778 if (chip_id(adap) <= CHELSIO_T5)
8779 p->vi[0].smt_idx = (ret & 0x7f) << 1;
8781 p->vi[0].smt_idx = (ret & 0x7f);
8782 p->vi[0].rss_size = rss_size;
8783 t4_os_set_hw_addr(p, addr);
8785 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
8786 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
8787 V_FW_PARAMS_PARAM_YZ(p->vi[0].viid);
8788 ret = t4_query_params(adap, mbox, pf, vf, 1, ¶m, &val);
8790 p->vi[0].rss_base = 0xffff;
8792 /* MPASS((val >> 16) == rss_size); */
8793 p->vi[0].rss_base = val & 0xffff;
8800 * t4_read_cimq_cfg - read CIM queue configuration
8801 * @adap: the adapter
8802 * @base: holds the queue base addresses in bytes
8803 * @size: holds the queue sizes in bytes
8804 * @thres: holds the queue full thresholds in bytes
8806 * Returns the current configuration of the CIM queues, starting with
8807 * the IBQs, then the OBQs.
8809 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
8812 int cim_num_obq = adap->chip_params->cim_num_obq;
8814 for (i = 0; i < CIM_NUM_IBQ; i++) {
8815 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
8817 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
8818 /* value is in 256-byte units */
8819 *base++ = G_CIMQBASE(v) * 256;
8820 *size++ = G_CIMQSIZE(v) * 256;
8821 *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
8823 for (i = 0; i < cim_num_obq; i++) {
8824 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
8826 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
8827 /* value is in 256-byte units */
8828 *base++ = G_CIMQBASE(v) * 256;
8829 *size++ = G_CIMQSIZE(v) * 256;
8834 * t4_read_cim_ibq - read the contents of a CIM inbound queue
8835 * @adap: the adapter
8836 * @qid: the queue index
8837 * @data: where to store the queue contents
8838 * @n: capacity of @data in 32-bit words
8840 * Reads the contents of the selected CIM queue starting at address 0 up
8841 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
8842 * error and the number of 32-bit words actually read on success.
8844 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
8846 int i, err, attempts;
8848 const unsigned int nwords = CIM_IBQ_SIZE * 4;
8850 if (qid > 5 || (n & 3))
8853 addr = qid * nwords;
8857 /* It might take 3-10ms before the IBQ debug read access is allowed.
8858 * Wait for 1 Sec with a delay of 1 usec.
8862 for (i = 0; i < n; i++, addr++) {
8863 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
8865 err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
8869 *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
8871 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
8876 * t4_read_cim_obq - read the contents of a CIM outbound queue
8877 * @adap: the adapter
8878 * @qid: the queue index
8879 * @data: where to store the queue contents
8880 * @n: capacity of @data in 32-bit words
8882 * Reads the contents of the selected CIM queue starting at address 0 up
8883 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
8884 * error and the number of 32-bit words actually read on success.
8886 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
8889 unsigned int addr, v, nwords;
8890 int cim_num_obq = adap->chip_params->cim_num_obq;
8892 if ((qid > (cim_num_obq - 1)) || (n & 3))
8895 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
8896 V_QUENUMSELECT(qid));
8897 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
8899 addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */
8900 nwords = G_CIMQSIZE(v) * 64; /* same */
8904 for (i = 0; i < n; i++, addr++) {
8905 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
8907 err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
8911 *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
8913 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
8919 CIM_CTL_BASE = 0x2000,
8920 CIM_PBT_ADDR_BASE = 0x2800,
8921 CIM_PBT_LRF_BASE = 0x3000,
8922 CIM_PBT_DATA_BASE = 0x3800
8926 * t4_cim_read - read a block from CIM internal address space
8927 * @adap: the adapter
8928 * @addr: the start address within the CIM address space
8929 * @n: number of words to read
8930 * @valp: where to store the result
8932 * Reads a block of 4-byte words from the CIM intenal address space.
8934 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
8939 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
8942 for ( ; !ret && n--; addr += 4) {
8943 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
8944 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
8947 *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
8953 * t4_cim_write - write a block into CIM internal address space
8954 * @adap: the adapter
8955 * @addr: the start address within the CIM address space
8956 * @n: number of words to write
8957 * @valp: set of values to write
8959 * Writes a block of 4-byte words into the CIM intenal address space.
8961 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
8962 const unsigned int *valp)
8966 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
8969 for ( ; !ret && n--; addr += 4) {
8970 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
8971 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
8972 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
8978 static int t4_cim_write1(struct adapter *adap, unsigned int addr,
8981 return t4_cim_write(adap, addr, 1, &val);
8985 * t4_cim_ctl_read - read a block from CIM control region
8986 * @adap: the adapter
8987 * @addr: the start address within the CIM control region
8988 * @n: number of words to read
8989 * @valp: where to store the result
8991 * Reads a block of 4-byte words from the CIM control region.
8993 int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
8996 return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
9000 * t4_cim_read_la - read CIM LA capture buffer
9001 * @adap: the adapter
9002 * @la_buf: where to store the LA data
9003 * @wrptr: the HW write pointer within the capture buffer
9005 * Reads the contents of the CIM LA buffer with the most recent entry at
9006 * the end of the returned data and with the entry at @wrptr first.
9007 * We try to leave the LA in the running state we find it in.
9009 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
9012 unsigned int cfg, val, idx;
9014 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
9018 if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */
9019 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
9024 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
9028 idx = G_UPDBGLAWRPTR(val);
9032 for (i = 0; i < adap->params.cim_la_size; i++) {
9033 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
9034 V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
9037 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
9040 if (val & F_UPDBGLARDEN) {
9044 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
9048 /* address can't exceed 0xfff (UpDbgLaRdPtr is of 12-bits) */
9049 idx = (idx + 1) & M_UPDBGLARDPTR;
9051 * Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
9052 * identify the 32-bit portion of the full 312-bit data
9055 while ((idx & 0xf) > 9)
9056 idx = (idx + 1) % M_UPDBGLARDPTR;
9059 if (cfg & F_UPDBGLAEN) {
9060 int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
9061 cfg & ~F_UPDBGLARDEN);
9069 * t4_tp_read_la - read TP LA capture buffer
9070 * @adap: the adapter
9071 * @la_buf: where to store the LA data
9072 * @wrptr: the HW write pointer within the capture buffer
9074 * Reads the contents of the TP LA buffer with the most recent entry at
9075 * the end of the returned data and with the entry at @wrptr first.
9076 * We leave the LA in the running state we find it in.
9078 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
9080 bool last_incomplete;
9081 unsigned int i, cfg, val, idx;
9083 cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
9084 if (cfg & F_DBGLAENABLE) /* freeze LA */
9085 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
9086 adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
9088 val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
9089 idx = G_DBGLAWPTR(val);
9090 last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
9091 if (last_incomplete)
9092 idx = (idx + 1) & M_DBGLARPTR;
9097 val &= ~V_DBGLARPTR(M_DBGLARPTR);
9098 val |= adap->params.tp.la_mask;
9100 for (i = 0; i < TPLA_SIZE; i++) {
9101 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
9102 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
9103 idx = (idx + 1) & M_DBGLARPTR;
9106 /* Wipe out last entry if it isn't valid */
9107 if (last_incomplete)
9108 la_buf[TPLA_SIZE - 1] = ~0ULL;
9110 if (cfg & F_DBGLAENABLE) /* restore running state */
9111 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
9112 cfg | adap->params.tp.la_mask);
9116 * SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
9117 * seconds). If we find one of the SGE Ingress DMA State Machines in the same
9118 * state for more than the Warning Threshold then we'll issue a warning about
9119 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel
9120 * appears to be hung every Warning Repeat second till the situation clears.
9121 * If the situation clears, we'll note that as well.
9123 #define SGE_IDMA_WARN_THRESH 1
9124 #define SGE_IDMA_WARN_REPEAT 300
9127 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
9128 * @adapter: the adapter
9129 * @idma: the adapter IDMA Monitor state
9131 * Initialize the state of an SGE Ingress DMA Monitor.
9133 void t4_idma_monitor_init(struct adapter *adapter,
9134 struct sge_idma_monitor_state *idma)
9136 /* Initialize the state variables for detecting an SGE Ingress DMA
9137 * hang. The SGE has internal counters which count up on each clock
9138 * tick whenever the SGE finds its Ingress DMA State Engines in the
9139 * same state they were on the previous clock tick. The clock used is
9140 * the Core Clock so we have a limit on the maximum "time" they can
9141 * record; typically a very small number of seconds. For instance,
9142 * with a 600MHz Core Clock, we can only count up to a bit more than
9143 * 7s. So we'll synthesize a larger counter in order to not run the
9144 * risk of having the "timers" overflow and give us the flexibility to
9145 * maintain a Hung SGE State Machine of our own which operates across
9146 * a longer time frame.
9148 idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
9149 idma->idma_stalled[0] = idma->idma_stalled[1] = 0;
9153 * t4_idma_monitor - monitor SGE Ingress DMA state
9154 * @adapter: the adapter
9155 * @idma: the adapter IDMA Monitor state
9156 * @hz: number of ticks/second
9157 * @ticks: number of ticks since the last IDMA Monitor call
9159 void t4_idma_monitor(struct adapter *adapter,
9160 struct sge_idma_monitor_state *idma,
9163 int i, idma_same_state_cnt[2];
9165 /* Read the SGE Debug Ingress DMA Same State Count registers. These
9166 * are counters inside the SGE which count up on each clock when the
9167 * SGE finds its Ingress DMA State Engines in the same states they
9168 * were in the previous clock. The counters will peg out at
9169 * 0xffffffff without wrapping around so once they pass the 1s
9170 * threshold they'll stay above that till the IDMA state changes.
9172 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13);
9173 idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH);
9174 idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
9176 for (i = 0; i < 2; i++) {
9177 u32 debug0, debug11;
9179 /* If the Ingress DMA Same State Counter ("timer") is less
9180 * than 1s, then we can reset our synthesized Stall Timer and
9181 * continue. If we have previously emitted warnings about a
9182 * potential stalled Ingress Queue, issue a note indicating
9183 * that the Ingress Queue has resumed forward progress.
9185 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
9186 if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz)
9187 CH_WARN(adapter, "SGE idma%d, queue %u, "
9188 "resumed after %d seconds\n",
9189 i, idma->idma_qid[i],
9190 idma->idma_stalled[i]/hz);
9191 idma->idma_stalled[i] = 0;
9195 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
9196 * domain. The first time we get here it'll be because we
9197 * passed the 1s Threshold; each additional time it'll be
9198 * because the RX Timer Callback is being fired on its regular
9201 * If the stall is below our Potential Hung Ingress Queue
9202 * Warning Threshold, continue.
9204 if (idma->idma_stalled[i] == 0) {
9205 idma->idma_stalled[i] = hz;
9206 idma->idma_warn[i] = 0;
9208 idma->idma_stalled[i] += ticks;
9209 idma->idma_warn[i] -= ticks;
9212 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz)
9215 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
9217 if (idma->idma_warn[i] > 0)
9219 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz;
9221 /* Read and save the SGE IDMA State and Queue ID information.
9222 * We do this every time in case it changes across time ...
9223 * can't be too careful ...
9225 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0);
9226 debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
9227 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
9229 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11);
9230 debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
9231 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
9233 CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in "
9234 " state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
9235 i, idma->idma_qid[i], idma->idma_state[i],
9236 idma->idma_stalled[i]/hz,
9238 t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
9243 * t4_read_pace_tbl - read the pace table
9244 * @adap: the adapter
9245 * @pace_vals: holds the returned values
9247 * Returns the values of TP's pace table in microseconds.
9249 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
9253 for (i = 0; i < NTX_SCHED; i++) {
9254 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
9255 v = t4_read_reg(adap, A_TP_PACE_TABLE);
9256 pace_vals[i] = dack_ticks_to_usec(adap, v);
9261 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
9262 * @adap: the adapter
9263 * @sched: the scheduler index
9264 * @kbps: the byte rate in Kbps
9265 * @ipg: the interpacket delay in tenths of nanoseconds
9267 * Return the current configuration of a HW Tx scheduler.
9269 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
9270 unsigned int *ipg, bool sleep_ok)
9272 unsigned int v, addr, bpt, cpt;
9275 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
9276 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
9279 bpt = (v >> 8) & 0xff;
9282 *kbps = 0; /* scheduler disabled */
9284 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
9285 *kbps = (v * bpt) / 125;
9289 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
9290 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
9294 *ipg = (10000 * v) / core_ticks_per_usec(adap);
9299 * t4_load_cfg - download config file
9300 * @adap: the adapter
9301 * @cfg_data: the cfg text file to write
9302 * @size: text file size
9304 * Write the supplied config text file to the card's serial flash.
9306 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
9308 int ret, i, n, cfg_addr;
9310 unsigned int flash_cfg_start_sec;
9311 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
9313 cfg_addr = t4_flash_cfg_addr(adap);
9318 flash_cfg_start_sec = addr / SF_SEC_SIZE;
9320 if (size > FLASH_CFG_MAX_SIZE) {
9321 CH_ERR(adap, "cfg file too large, max is %u bytes\n",
9322 FLASH_CFG_MAX_SIZE);
9326 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
9328 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
9329 flash_cfg_start_sec + i - 1);
9331 * If size == 0 then we're simply erasing the FLASH sectors associated
9332 * with the on-adapter Firmware Configuration File.
9334 if (ret || size == 0)
9337 /* this will write to the flash up to SF_PAGE_SIZE at a time */
9338 for (i = 0; i< size; i+= SF_PAGE_SIZE) {
9339 if ( (size - i) < SF_PAGE_SIZE)
9343 ret = t4_write_flash(adap, addr, n, cfg_data, 1);
9347 addr += SF_PAGE_SIZE;
9348 cfg_data += SF_PAGE_SIZE;
9353 CH_ERR(adap, "config file %s failed %d\n",
9354 (size == 0 ? "clear" : "download"), ret);
9359 * t5_fw_init_extern_mem - initialize the external memory
9360 * @adap: the adapter
9362 * Initializes the external memory on T5.
9364 int t5_fw_init_extern_mem(struct adapter *adap)
9366 u32 params[1], val[1];
9372 val[0] = 0xff; /* Initialize all MCs */
9373 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
9374 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT));
9375 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val,
9376 FW_CMD_MAX_TIMEOUT);
9381 /* BIOS boot headers */
9382 typedef struct pci_expansion_rom_header {
9383 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
9384 u8 reserved[22]; /* Reserved per processor Architecture data */
9385 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
9386 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
9388 /* Legacy PCI Expansion ROM Header */
9389 typedef struct legacy_pci_expansion_rom_header {
9390 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
9391 u8 size512; /* Current Image Size in units of 512 bytes */
9392 u8 initentry_point[4];
9393 u8 cksum; /* Checksum computed on the entire Image */
9394 u8 reserved[16]; /* Reserved */
9395 u8 pcir_offset[2]; /* Offset to PCI Data Struture */
9396 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
9398 /* EFI PCI Expansion ROM Header */
9399 typedef struct efi_pci_expansion_rom_header {
9400 u8 signature[2]; // ROM signature. The value 0xaa55
9401 u8 initialization_size[2]; /* Units 512. Includes this header */
9402 u8 efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
9403 u8 efi_subsystem[2]; /* Subsystem value for EFI image header */
9404 u8 efi_machine_type[2]; /* Machine type from EFI image header */
9405 u8 compression_type[2]; /* Compression type. */
9407 * Compression type definition
9410 * 0x2-0xFFFF: Reserved
9412 u8 reserved[8]; /* Reserved */
9413 u8 efi_image_header_offset[2]; /* Offset to EFI Image */
9414 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
9415 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
9417 /* PCI Data Structure Format */
9418 typedef struct pcir_data_structure { /* PCI Data Structure */
9419 u8 signature[4]; /* Signature. The string "PCIR" */
9420 u8 vendor_id[2]; /* Vendor Identification */
9421 u8 device_id[2]; /* Device Identification */
9422 u8 vital_product[2]; /* Pointer to Vital Product Data */
9423 u8 length[2]; /* PCIR Data Structure Length */
9424 u8 revision; /* PCIR Data Structure Revision */
9425 u8 class_code[3]; /* Class Code */
9426 u8 image_length[2]; /* Image Length. Multiple of 512B */
9427 u8 code_revision[2]; /* Revision Level of Code/Data */
9428 u8 code_type; /* Code Type. */
9430 * PCI Expansion ROM Code Types
9431 * 0x00: Intel IA-32, PC-AT compatible. Legacy
9432 * 0x01: Open Firmware standard for PCI. FCODE
9433 * 0x02: Hewlett-Packard PA RISC. HP reserved
9434 * 0x03: EFI Image. EFI
9435 * 0x04-0xFF: Reserved.
9437 u8 indicator; /* Indicator. Identifies the last image in the ROM */
9438 u8 reserved[2]; /* Reserved */
9439 } pcir_data_t; /* PCI__DATA_STRUCTURE */
9441 /* BOOT constants */
9443 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
9444 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */
9445 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */
9446 BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
9447 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment */
9448 VENDOR_ID = 0x1425, /* Vendor ID */
9449 PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
9453 * modify_device_id - Modifies the device ID of the Boot BIOS image
9454 * @adatper: the device ID to write.
9455 * @boot_data: the boot image to modify.
9457 * Write the supplied device ID to the boot BIOS image.
9459 static void modify_device_id(int device_id, u8 *boot_data)
9461 legacy_pci_exp_rom_header_t *header;
9462 pcir_data_t *pcir_header;
9466 * Loop through all chained images and change the device ID's
9469 header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
9470 pcir_header = (pcir_data_t *) &boot_data[cur_header +
9471 le16_to_cpu(*(u16*)header->pcir_offset)];
9474 * Only modify the Device ID if code type is Legacy or HP.
9475 * 0x00: Okay to modify
9476 * 0x01: FCODE. Do not be modify
9477 * 0x03: Okay to modify
9478 * 0x04-0xFF: Do not modify
9480 if (pcir_header->code_type == 0x00) {
9485 * Modify Device ID to match current adatper
9487 *(u16*) pcir_header->device_id = device_id;
9490 * Set checksum temporarily to 0.
9491 * We will recalculate it later.
9493 header->cksum = 0x0;
9496 * Calculate and update checksum
9498 for (i = 0; i < (header->size512 * 512); i++)
9499 csum += (u8)boot_data[cur_header + i];
9502 * Invert summed value to create the checksum
9503 * Writing new checksum value directly to the boot data
9505 boot_data[cur_header + 7] = -csum;
9507 } else if (pcir_header->code_type == 0x03) {
9510 * Modify Device ID to match current adatper
9512 *(u16*) pcir_header->device_id = device_id;
9518 * Check indicator element to identify if this is the last
9521 if (pcir_header->indicator & 0x80)
9525 * Move header pointer up to the next image in the ROM.
9527 cur_header += header->size512 * 512;
9532 * t4_load_boot - download boot flash
9533 * @adapter: the adapter
9534 * @boot_data: the boot image to write
9535 * @boot_addr: offset in flash to write boot_data
9538 * Write the supplied boot image to the card's serial flash.
9539 * The boot image has the following sections: a 28-byte header and the
9542 int t4_load_boot(struct adapter *adap, u8 *boot_data,
9543 unsigned int boot_addr, unsigned int size)
9545 pci_exp_rom_header_t *header;
9547 pcir_data_t *pcir_header;
9551 unsigned int boot_sector = (boot_addr * 1024 );
9552 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
9555 * Make sure the boot image does not encroach on the firmware region
9557 if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
9558 CH_ERR(adap, "boot image encroaching on firmware region\n");
9563 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
9564 * and Boot configuration data sections. These 3 boot sections span
9565 * sectors 0 to 7 in flash and live right before the FW image location.
9567 i = DIV_ROUND_UP(size ? size : FLASH_FW_START,
9569 ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
9570 (boot_sector >> 16) + i - 1);
9573 * If size == 0 then we're simply erasing the FLASH sectors associated
9574 * with the on-adapter option ROM file
9576 if (ret || (size == 0))
9579 /* Get boot header */
9580 header = (pci_exp_rom_header_t *)boot_data;
9581 pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
9582 /* PCIR Data Structure */
9583 pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
9586 * Perform some primitive sanity testing to avoid accidentally
9587 * writing garbage over the boot sectors. We ought to check for
9588 * more but it's not worth it for now ...
9590 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
9591 CH_ERR(adap, "boot image too small/large\n");
9595 #ifndef CHELSIO_T4_DIAGS
9597 * Check BOOT ROM header signature
9599 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
9600 CH_ERR(adap, "Boot image missing signature\n");
9605 * Check PCI header signature
9607 if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
9608 CH_ERR(adap, "PCI header missing signature\n");
9613 * Check Vendor ID matches Chelsio ID
9615 if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
9616 CH_ERR(adap, "Vendor ID missing signature\n");
9622 * Retrieve adapter's device ID
9624 t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
9625 /* Want to deal with PF 0 so I strip off PF 4 indicator */
9626 device_id = device_id & 0xf0ff;
9629 * Check PCIE Device ID
9631 if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
9633 * Change the device ID in the Boot BIOS image to match
9634 * the Device ID of the current adapter.
9636 modify_device_id(device_id, boot_data);
9640 * Skip over the first SF_PAGE_SIZE worth of data and write it after
9641 * we finish copying the rest of the boot image. This will ensure
9642 * that the BIOS boot header will only be written if the boot image
9643 * was written in full.
9646 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
9647 addr += SF_PAGE_SIZE;
9648 boot_data += SF_PAGE_SIZE;
9649 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
9654 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
9655 (const u8 *)header, 0);
9659 CH_ERR(adap, "boot image download failed, error %d\n", ret);
9664 * t4_flash_bootcfg_addr - return the address of the flash optionrom configuration
9665 * @adapter: the adapter
9667 * Return the address within the flash where the OptionROM Configuration
9668 * is stored, or an error if the device FLASH is too small to contain
9669 * a OptionROM Configuration.
9671 static int t4_flash_bootcfg_addr(struct adapter *adapter)
9674 * If the device FLASH isn't large enough to hold a Firmware
9675 * Configuration File, return an error.
9677 if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE)
9680 return FLASH_BOOTCFG_START;
9683 int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size)
9685 int ret, i, n, cfg_addr;
9687 unsigned int flash_cfg_start_sec;
9688 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
9690 cfg_addr = t4_flash_bootcfg_addr(adap);
9695 flash_cfg_start_sec = addr / SF_SEC_SIZE;
9697 if (size > FLASH_BOOTCFG_MAX_SIZE) {
9698 CH_ERR(adap, "bootcfg file too large, max is %u bytes\n",
9699 FLASH_BOOTCFG_MAX_SIZE);
9703 i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */
9705 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
9706 flash_cfg_start_sec + i - 1);
9709 * If size == 0 then we're simply erasing the FLASH sectors associated
9710 * with the on-adapter OptionROM Configuration File.
9712 if (ret || size == 0)
9715 /* this will write to the flash up to SF_PAGE_SIZE at a time */
9716 for (i = 0; i< size; i+= SF_PAGE_SIZE) {
9717 if ( (size - i) < SF_PAGE_SIZE)
9721 ret = t4_write_flash(adap, addr, n, cfg_data, 0);
9725 addr += SF_PAGE_SIZE;
9726 cfg_data += SF_PAGE_SIZE;
9731 CH_ERR(adap, "boot config data %s failed %d\n",
9732 (size == 0 ? "clear" : "download"), ret);
9737 * t4_set_filter_mode - configure the optional components of filter tuples
9738 * @adap: the adapter
9739 * @mode_map: a bitmap selcting which optional filter components to enable
9740 * @sleep_ok: if true we may sleep while awaiting command completion
9742 * Sets the filter mode by selecting the optional components to enable
9743 * in filter tuples. Returns 0 on success and a negative error if the
9744 * requested mode needs more bits than are available for optional
9747 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map,
9750 static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
9754 for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
9755 if (mode_map & (1 << i))
9757 if (nbits > FILTER_OPT_LEN)
9759 t4_tp_pio_write(adap, &mode_map, 1, A_TP_VLAN_PRI_MAP, sleep_ok);
9760 read_filter_mode_and_ingress_config(adap, sleep_ok);
9766 * t4_clr_port_stats - clear port statistics
9767 * @adap: the adapter
9768 * @idx: the port index
9770 * Clear HW statistics for the given port.
9772 void t4_clr_port_stats(struct adapter *adap, int idx)
9775 u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map;
9779 port_base_addr = PORT_BASE(idx);
9781 port_base_addr = T5_PORT_BASE(idx);
9783 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
9784 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
9785 t4_write_reg(adap, port_base_addr + i, 0);
9786 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
9787 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
9788 t4_write_reg(adap, port_base_addr + i, 0);
9789 for (i = 0; i < 4; i++)
9790 if (bgmap & (1 << i)) {
9792 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
9794 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
9799 * t4_i2c_rd - read I2C data from adapter
9800 * @adap: the adapter
9801 * @port: Port number if per-port device; <0 if not
9802 * @devid: per-port device ID or absolute device ID
9803 * @offset: byte offset into device I2C space
9804 * @len: byte length of I2C space data
9805 * @buf: buffer in which to return I2C data
9807 * Reads the I2C data from the indicated device and location.
9809 int t4_i2c_rd(struct adapter *adap, unsigned int mbox,
9810 int port, unsigned int devid,
9811 unsigned int offset, unsigned int len,
9815 struct fw_ldst_cmd ldst;
9821 len > sizeof ldst.u.i2c.data)
9824 memset(&ldst, 0, sizeof ldst);
9825 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
9826 ldst.op_to_addrspace =
9827 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
9831 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
9832 ldst.u.i2c.pid = (port < 0 ? 0xff : port);
9833 ldst.u.i2c.did = devid;
9834 ldst.u.i2c.boffset = offset;
9835 ldst.u.i2c.blen = len;
9836 ret = t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
9838 memcpy(buf, ldst.u.i2c.data, len);
9843 * t4_i2c_wr - write I2C data to adapter
9844 * @adap: the adapter
9845 * @port: Port number if per-port device; <0 if not
9846 * @devid: per-port device ID or absolute device ID
9847 * @offset: byte offset into device I2C space
9848 * @len: byte length of I2C space data
9849 * @buf: buffer containing new I2C data
9851 * Write the I2C data to the indicated device and location.
9853 int t4_i2c_wr(struct adapter *adap, unsigned int mbox,
9854 int port, unsigned int devid,
9855 unsigned int offset, unsigned int len,
9859 struct fw_ldst_cmd ldst;
9864 len > sizeof ldst.u.i2c.data)
9867 memset(&ldst, 0, sizeof ldst);
9868 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
9869 ldst.op_to_addrspace =
9870 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
9874 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
9875 ldst.u.i2c.pid = (port < 0 ? 0xff : port);
9876 ldst.u.i2c.did = devid;
9877 ldst.u.i2c.boffset = offset;
9878 ldst.u.i2c.blen = len;
9879 memcpy(ldst.u.i2c.data, buf, len);
9880 return t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
9884 * t4_sge_ctxt_rd - read an SGE context through FW
9885 * @adap: the adapter
9886 * @mbox: mailbox to use for the FW command
9887 * @cid: the context id
9888 * @ctype: the context type
9889 * @data: where to store the context data
9891 * Issues a FW command through the given mailbox to read an SGE context.
9893 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
9894 enum ctxt_type ctype, u32 *data)
9897 struct fw_ldst_cmd c;
9899 if (ctype == CTXT_EGRESS)
9900 ret = FW_LDST_ADDRSPC_SGE_EGRC;
9901 else if (ctype == CTXT_INGRESS)
9902 ret = FW_LDST_ADDRSPC_SGE_INGC;
9903 else if (ctype == CTXT_FLM)
9904 ret = FW_LDST_ADDRSPC_SGE_FLMC;
9906 ret = FW_LDST_ADDRSPC_SGE_CONMC;
9908 memset(&c, 0, sizeof(c));
9909 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
9910 F_FW_CMD_REQUEST | F_FW_CMD_READ |
9911 V_FW_LDST_CMD_ADDRSPACE(ret));
9912 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
9913 c.u.idctxt.physid = cpu_to_be32(cid);
9915 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
9917 data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
9918 data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
9919 data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
9920 data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
9921 data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
9922 data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
9928 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
9929 * @adap: the adapter
9930 * @cid: the context id
9931 * @ctype: the context type
9932 * @data: where to store the context data
9934 * Reads an SGE context directly, bypassing FW. This is only for
9935 * debugging when FW is unavailable.
9937 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
9942 t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
9943 ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
9945 for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
9946 *data++ = t4_read_reg(adap, i);
9950 int t4_sched_config(struct adapter *adapter, int type, int minmaxen,
9953 struct fw_sched_cmd cmd;
9955 memset(&cmd, 0, sizeof(cmd));
9956 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
9959 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9961 cmd.u.config.sc = FW_SCHED_SC_CONFIG;
9962 cmd.u.config.type = type;
9963 cmd.u.config.minmaxen = minmaxen;
9965 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
9969 int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
9970 int rateunit, int ratemode, int channel, int cl,
9971 int minrate, int maxrate, int weight, int pktsize,
9974 struct fw_sched_cmd cmd;
9976 memset(&cmd, 0, sizeof(cmd));
9977 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
9980 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9982 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
9983 cmd.u.params.type = type;
9984 cmd.u.params.level = level;
9985 cmd.u.params.mode = mode;
9986 cmd.u.params.ch = channel;
9987 cmd.u.params.cl = cl;
9988 cmd.u.params.unit = rateunit;
9989 cmd.u.params.rate = ratemode;
9990 cmd.u.params.min = cpu_to_be32(minrate);
9991 cmd.u.params.max = cpu_to_be32(maxrate);
9992 cmd.u.params.weight = cpu_to_be16(weight);
9993 cmd.u.params.pktsize = cpu_to_be16(pktsize);
9995 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
9999 int t4_sched_params_ch_rl(struct adapter *adapter, int channel, int ratemode,
10000 unsigned int maxrate, int sleep_ok)
10002 struct fw_sched_cmd cmd;
10004 memset(&cmd, 0, sizeof(cmd));
10005 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
10008 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
10010 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
10011 cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
10012 cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CH_RL;
10013 cmd.u.params.ch = channel;
10014 cmd.u.params.rate = ratemode; /* REL or ABS */
10015 cmd.u.params.max = cpu_to_be32(maxrate);/* % or kbps */
10017 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
10021 int t4_sched_params_cl_wrr(struct adapter *adapter, int channel, int cl,
10022 int weight, int sleep_ok)
10024 struct fw_sched_cmd cmd;
10026 if (weight < 0 || weight > 100)
10029 memset(&cmd, 0, sizeof(cmd));
10030 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
10033 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
10035 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
10036 cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
10037 cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_WRR;
10038 cmd.u.params.ch = channel;
10039 cmd.u.params.cl = cl;
10040 cmd.u.params.weight = cpu_to_be16(weight);
10042 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
10046 int t4_sched_params_cl_rl_kbps(struct adapter *adapter, int channel, int cl,
10047 int mode, unsigned int maxrate, int pktsize, int sleep_ok)
10049 struct fw_sched_cmd cmd;
10051 memset(&cmd, 0, sizeof(cmd));
10052 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
10055 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
10057 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
10058 cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
10059 cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_RL;
10060 cmd.u.params.mode = mode;
10061 cmd.u.params.ch = channel;
10062 cmd.u.params.cl = cl;
10063 cmd.u.params.unit = FW_SCHED_PARAMS_UNIT_BITRATE;
10064 cmd.u.params.rate = FW_SCHED_PARAMS_RATE_ABS;
10065 cmd.u.params.max = cpu_to_be32(maxrate);
10066 cmd.u.params.pktsize = cpu_to_be16(pktsize);
10068 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
10073 * t4_config_watchdog - configure (enable/disable) a watchdog timer
10074 * @adapter: the adapter
10075 * @mbox: mailbox to use for the FW command
10076 * @pf: the PF owning the queue
10077 * @vf: the VF owning the queue
10078 * @timeout: watchdog timeout in ms
10079 * @action: watchdog timer / action
10081 * There are separate watchdog timers for each possible watchdog
10082 * action. Configure one of the watchdog timers by setting a non-zero
10083 * timeout. Disable a watchdog timer by using a timeout of zero.
10085 int t4_config_watchdog(struct adapter *adapter, unsigned int mbox,
10086 unsigned int pf, unsigned int vf,
10087 unsigned int timeout, unsigned int action)
10089 struct fw_watchdog_cmd wdog;
10090 unsigned int ticks;
10093 * The watchdog command expects a timeout in units of 10ms so we need
10094 * to convert it here (via rounding) and force a minimum of one 10ms
10095 * "tick" if the timeout is non-zero but the conversion results in 0
10098 ticks = (timeout + 5)/10;
10099 if (timeout && !ticks)
10102 memset(&wdog, 0, sizeof wdog);
10103 wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) |
10106 V_FW_PARAMS_CMD_PFN(pf) |
10107 V_FW_PARAMS_CMD_VFN(vf));
10108 wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog));
10109 wdog.timeout = cpu_to_be32(ticks);
10110 wdog.action = cpu_to_be32(action);
10112 return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL);
10115 int t4_get_devlog_level(struct adapter *adapter, unsigned int *level)
10117 struct fw_devlog_cmd devlog_cmd;
10120 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
10121 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
10122 F_FW_CMD_REQUEST | F_FW_CMD_READ);
10123 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
10124 ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
10125 sizeof(devlog_cmd), &devlog_cmd);
10129 *level = devlog_cmd.level;
10133 int t4_set_devlog_level(struct adapter *adapter, unsigned int level)
10135 struct fw_devlog_cmd devlog_cmd;
10137 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
10138 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
10141 devlog_cmd.level = level;
10142 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
10143 return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
10144 sizeof(devlog_cmd), &devlog_cmd);