2 * Copyright (c) 2011 Chelsio Communications, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
32 #include "t4_regs_values.h"
33 #include "firmware/t4fw_interface.h"
36 #define msleep(x) pause("t4hw", (x) * hz / 1000)
39 * t4_wait_op_done_val - wait until an operation is completed
40 * @adapter: the adapter performing the operation
41 * @reg: the register to check for completion
42 * @mask: a single-bit field within @reg that indicates completion
43 * @polarity: the value of the field when the operation is completed
44 * @attempts: number of check iterations
45 * @delay: delay in usecs between iterations
46 * @valp: where to store the value of the register at completion time
48 * Wait until an operation is completed by checking a bit in a register
49 * up to @attempts times. If @valp is not NULL the value of the register
50 * at the time it indicated completion is stored there. Returns 0 if the
51 * operation completes and -EAGAIN otherwise.
53 int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
57 u32 val = t4_read_reg(adapter, reg);
59 if (!!(val & mask) == polarity) {
72 * t4_set_reg_field - set a register field to a value
73 * @adapter: the adapter to program
74 * @addr: the register address
75 * @mask: specifies the portion of the register to modify
76 * @val: the new value for the register field
78 * Sets a register field specified by the supplied mask to the
81 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
84 u32 v = t4_read_reg(adapter, addr) & ~mask;
86 t4_write_reg(adapter, addr, v | val);
87 (void) t4_read_reg(adapter, addr); /* flush */
91 * t4_read_indirect - read indirectly addressed registers
93 * @addr_reg: register holding the indirect address
94 * @data_reg: register holding the value of the indirect register
95 * @vals: where the read register values are stored
96 * @nregs: how many indirect registers to read
97 * @start_idx: index of first indirect register to read
99 * Reads registers that are accessed indirectly through an address/data
102 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
103 unsigned int data_reg, u32 *vals, unsigned int nregs,
104 unsigned int start_idx)
107 t4_write_reg(adap, addr_reg, start_idx);
108 *vals++ = t4_read_reg(adap, data_reg);
114 * t4_write_indirect - write indirectly addressed registers
116 * @addr_reg: register holding the indirect addresses
117 * @data_reg: register holding the value for the indirect registers
118 * @vals: values to write
119 * @nregs: how many indirect registers to write
120 * @start_idx: address of first indirect register to write
122 * Writes a sequential block of registers that are accessed indirectly
123 * through an address/data register pair.
125 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
126 unsigned int data_reg, const u32 *vals,
127 unsigned int nregs, unsigned int start_idx)
130 t4_write_reg(adap, addr_reg, start_idx++);
131 t4_write_reg(adap, data_reg, *vals++);
136 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
138 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
141 for ( ; nflit; nflit--, mbox_addr += 8)
142 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
146 * Handle a FW assertion reported in a mailbox.
148 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
150 struct fw_debug_cmd asrt;
152 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
153 CH_ALERT(adap, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
154 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
155 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
158 #define X_CIM_PF_NOACCESS 0xeeeeeeee
160 * t4_wr_mbox_meat - send a command to FW through the given mailbox
162 * @mbox: index of the mailbox to use
163 * @cmd: the command to write
164 * @size: command length in bytes
165 * @rpl: where to optionally store the reply
166 * @sleep_ok: if true we may sleep while awaiting command completion
168 * Sends the given command to FW through the selected mailbox and waits
169 * for the FW to execute the command. If @rpl is not %NULL it is used to
170 * store the FW's reply to the command. The command and its optional
171 * reply are of the same length. Some FW commands like RESET and
172 * INITIALIZE can take a considerable amount of time to execute.
173 * @sleep_ok determines whether we may sleep while awaiting the response.
174 * If sleeping is allowed we use progressive backoff otherwise we spin.
176 * The return value is 0 on success or a negative errno on failure. A
177 * failure can happen either because we are not able to execute the
178 * command or FW executes it but signals an error. In the latter case
179 * the return value is the error code indicated by FW (negated).
181 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
182 void *rpl, bool sleep_ok)
185 * We delay in small increments at first in an effort to maintain
186 * responsiveness for simple, fast executing commands but then back
187 * off to larger delays to a maximum retry delay.
189 static const int delay[] = {
190 1, 1, 3, 5, 10, 10, 20, 50, 100
195 int i, ms, delay_idx;
196 const __be64 *p = cmd;
198 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
199 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
201 if ((size & 15) || size > MBOX_LEN)
204 v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
205 for (i = 0; v == X_MBOWNER_NONE && i < 3; i++)
206 v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
208 if (v != X_MBOWNER_PL)
209 return v ? -EBUSY : -ETIMEDOUT;
211 for (i = 0; i < size; i += 8, p++)
212 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
214 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
215 t4_read_reg(adap, ctl_reg); /* flush write */
220 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
222 ms = delay[delay_idx]; /* last element may repeat */
223 if (delay_idx < ARRAY_SIZE(delay) - 1)
229 v = t4_read_reg(adap, ctl_reg);
230 if (v == X_CIM_PF_NOACCESS)
232 if (G_MBOWNER(v) == X_MBOWNER_PL) {
233 if (!(v & F_MBMSGVALID)) {
234 t4_write_reg(adap, ctl_reg,
235 V_MBOWNER(X_MBOWNER_NONE));
239 res = t4_read_reg64(adap, data_reg);
240 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
241 fw_asrt(adap, data_reg);
242 res = V_FW_CMD_RETVAL(EIO);
244 get_mbox_rpl(adap, rpl, size / 8, data_reg);
245 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
246 return -G_FW_CMD_RETVAL((int)res);
250 CH_ERR(adap, "command %#x in mailbox %d timed out\n",
251 *(const u8 *)cmd, mbox);
256 * t4_mc_read - read from MC through backdoor accesses
258 * @addr: address of first byte requested
259 * @data: 64 bytes of data containing the requested address
260 * @ecc: where to store the corresponding 64-bit ECC word
262 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
263 * that covers the requested address @addr. If @parity is not %NULL it
264 * is assigned the 64-bit ECC word for the read data.
266 int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
270 if (t4_read_reg(adap, A_MC_BIST_CMD) & F_START_BIST)
272 t4_write_reg(adap, A_MC_BIST_CMD_ADDR, addr & ~0x3fU);
273 t4_write_reg(adap, A_MC_BIST_CMD_LEN, 64);
274 t4_write_reg(adap, A_MC_BIST_DATA_PATTERN, 0xc);
275 t4_write_reg(adap, A_MC_BIST_CMD, V_BIST_OPCODE(1) | F_START_BIST |
277 i = t4_wait_op_done(adap, A_MC_BIST_CMD, F_START_BIST, 0, 10, 1);
281 #define MC_DATA(i) MC_BIST_STATUS_REG(A_MC_BIST_STATUS_RDATA, i)
283 for (i = 15; i >= 0; i--)
284 *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
286 *ecc = t4_read_reg64(adap, MC_DATA(16));
292 * t4_edc_read - read from EDC through backdoor accesses
294 * @idx: which EDC to access
295 * @addr: address of first byte requested
296 * @data: 64 bytes of data containing the requested address
297 * @ecc: where to store the corresponding 64-bit ECC word
299 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
300 * that covers the requested address @addr. If @parity is not %NULL it
301 * is assigned the 64-bit ECC word for the read data.
303 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
308 if (t4_read_reg(adap, A_EDC_BIST_CMD + idx) & F_START_BIST)
310 t4_write_reg(adap, A_EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
311 t4_write_reg(adap, A_EDC_BIST_CMD_LEN + idx, 64);
312 t4_write_reg(adap, A_EDC_BIST_DATA_PATTERN + idx, 0xc);
313 t4_write_reg(adap, A_EDC_BIST_CMD + idx,
314 V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
315 i = t4_wait_op_done(adap, A_EDC_BIST_CMD + idx, F_START_BIST, 0, 10, 1);
319 #define EDC_DATA(i) (EDC_BIST_STATUS_REG(A_EDC_BIST_STATUS_RDATA, i) + idx)
321 for (i = 15; i >= 0; i--)
322 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
324 *ecc = t4_read_reg64(adap, EDC_DATA(16));
330 * t4_mem_read - read EDC 0, EDC 1 or MC into buffer
332 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
333 * @addr: address within indicated memory type
334 * @len: amount of memory to read
335 * @buf: host memory buffer
337 * Reads an [almost] arbitrary memory region in the firmware: the
338 * firmware memory address, length and host buffer must be aligned on
339 * 32-bit boudaries. The memory is returned as a raw byte sequence from
340 * the firmware's memory. If this memory contains data structures which
341 * contain multi-byte integers, it's the callers responsibility to
342 * perform appropriate byte order conversions.
344 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
347 u32 pos, start, end, offset;
351 * Argument sanity checks ...
353 if ((addr & 0x3) || (len & 0x3))
357 * The underlaying EDC/MC read routines read 64 bytes at a time so we
358 * need to round down the start and round up the end. We'll start
359 * copying out of the first line at (addr - start) a word at a time.
361 start = addr & ~(64-1);
362 end = (addr + len + 64-1) & ~(64-1);
363 offset = (addr - start)/sizeof(__be32);
365 for (pos = start; pos < end; pos += 64, offset = 0) {
369 * Read the chip's memory block and bail if there's an error.
372 ret = t4_mc_read(adap, pos, data, NULL);
374 ret = t4_edc_read(adap, mtype, pos, data, NULL);
379 * Copy the data into the caller's memory buffer.
381 while (offset < 16 && len > 0) {
382 *buf++ = data[offset++];
383 len -= sizeof(__be32);
391 * Partial EEPROM Vital Product Data structure. Includes only the ID and
403 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
405 #define EEPROM_MAX_RD_POLL 40
406 #define EEPROM_MAX_WR_POLL 6
407 #define EEPROM_STAT_ADDR 0x7bfc
408 #define VPD_BASE 0x400
409 #define VPD_BASE_OLD 0
411 #define VPD_INFO_FLD_HDR_SIZE 3
414 * t4_seeprom_read - read a serial EEPROM location
415 * @adapter: adapter to read
416 * @addr: EEPROM virtual address
417 * @data: where to store the read data
419 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
420 * VPD capability. Note that this function must be called with a virtual
423 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
426 int attempts = EEPROM_MAX_RD_POLL;
427 unsigned int base = adapter->params.pci.vpd_cap_addr;
429 if (addr >= EEPROMVSIZE || (addr & 3))
432 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
435 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
436 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
438 if (!(val & PCI_VPD_ADDR_F)) {
439 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
442 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
443 *data = le32_to_cpu(*data);
448 * t4_seeprom_write - write a serial EEPROM location
449 * @adapter: adapter to write
450 * @addr: virtual EEPROM address
451 * @data: value to write
453 * Write a 32-bit word to a location in serial EEPROM using the card's PCI
454 * VPD capability. Note that this function must be called with a virtual
457 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
460 int attempts = EEPROM_MAX_WR_POLL;
461 unsigned int base = adapter->params.pci.vpd_cap_addr;
463 if (addr >= EEPROMVSIZE || (addr & 3))
466 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
468 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
469 (u16)addr | PCI_VPD_ADDR_F);
472 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
473 } while ((val & PCI_VPD_ADDR_F) && --attempts);
475 if (val & PCI_VPD_ADDR_F) {
476 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
483 * t4_eeprom_ptov - translate a physical EEPROM address to virtual
484 * @phys_addr: the physical EEPROM address
485 * @fn: the PCI function number
486 * @sz: size of function-specific area
488 * Translate a physical EEPROM address to virtual. The first 1K is
489 * accessed through virtual addresses starting at 31K, the rest is
490 * accessed through virtual addresses starting at 0.
492 * The mapping is as follows:
493 * [0..1K) -> [31K..32K)
494 * [1K..1K+A) -> [ES-A..ES)
495 * [1K+A..ES) -> [0..ES-A-1K)
497 * where A = @fn * @sz, and ES = EEPROM size.
499 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
502 if (phys_addr < 1024)
503 return phys_addr + (31 << 10);
504 if (phys_addr < 1024 + fn)
505 return EEPROMSIZE - fn + phys_addr - 1024;
506 if (phys_addr < EEPROMSIZE)
507 return phys_addr - 1024 - fn;
512 * t4_seeprom_wp - enable/disable EEPROM write protection
513 * @adapter: the adapter
514 * @enable: whether to enable or disable write protection
516 * Enables or disables write protection on the serial EEPROM.
518 int t4_seeprom_wp(struct adapter *adapter, int enable)
520 return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
524 * get_vpd_keyword_val - Locates an information field keyword in the VPD
525 * @v: Pointer to buffered vpd data structure
526 * @kw: The keyword to search for
528 * Returns the value of the information field keyword or
531 static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
534 unsigned int offset , len;
535 const u8 *buf = &v->id_tag;
536 const u8 *vpdr_len = &v->vpdr_tag;
537 offset = sizeof(struct t4_vpd_hdr);
538 len = (u16)vpdr_len[1] + ((u16)vpdr_len[2] << 8);
540 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
544 for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
545 if(memcmp(buf + i , kw , 2) == 0){
546 i += VPD_INFO_FLD_HDR_SIZE;
550 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
558 * get_vpd_params - read VPD parameters from VPD EEPROM
559 * @adapter: adapter to read
560 * @p: where to store the parameters
562 * Reads card parameters stored in VPD EEPROM.
564 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
568 u8 vpd[VPD_LEN], csum;
569 const struct t4_vpd_hdr *v;
572 * Card information normally starts at VPD_BASE but early cards had
575 ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd));
576 addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
578 for (i = 0; i < sizeof(vpd); i += 4) {
579 ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i));
583 v = (const struct t4_vpd_hdr *)vpd;
585 #define FIND_VPD_KW(var,name) do { \
586 var = get_vpd_keyword_val(v , name); \
588 CH_ERR(adapter, "missing VPD keyword " name "\n"); \
593 FIND_VPD_KW(i, "RV");
594 for (csum = 0; i >= 0; i--)
598 CH_ERR(adapter, "corrupted VPD EEPROM, actual csum %u\n", csum);
601 FIND_VPD_KW(ec, "EC");
602 FIND_VPD_KW(sn, "SN");
605 memcpy(p->id, v->id_data, ID_LEN);
607 memcpy(p->ec, vpd + ec, EC_LEN);
609 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
610 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
616 /* serial flash and firmware constants and flash config file constants */
618 SF_ATTEMPTS = 10, /* max retries for SF operations */
620 /* flash command opcodes */
621 SF_PROG_PAGE = 2, /* program page */
622 SF_WR_DISABLE = 4, /* disable writes */
623 SF_RD_STATUS = 5, /* read status register */
624 SF_WR_ENABLE = 6, /* enable writes */
625 SF_RD_DATA_FAST = 0xb, /* read flash */
626 SF_RD_ID = 0x9f, /* read ID */
627 SF_ERASE_SECTOR = 0xd8, /* erase sector */
631 * sf1_read - read data from the serial flash
632 * @adapter: the adapter
633 * @byte_cnt: number of bytes to read
634 * @cont: whether another operation will be chained
635 * @lock: whether to lock SF for PL access only
636 * @valp: where to store the read data
638 * Reads up to 4 bytes of data from the serial flash. The location of
639 * the read needs to be specified prior to calling this by issuing the
640 * appropriate commands to the serial flash.
642 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
647 if (!byte_cnt || byte_cnt > 4)
649 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
651 t4_write_reg(adapter, A_SF_OP,
652 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
653 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
655 *valp = t4_read_reg(adapter, A_SF_DATA);
660 * sf1_write - write data to the serial flash
661 * @adapter: the adapter
662 * @byte_cnt: number of bytes to write
663 * @cont: whether another operation will be chained
664 * @lock: whether to lock SF for PL access only
665 * @val: value to write
667 * Writes up to 4 bytes of data to the serial flash. The location of
668 * the write needs to be specified prior to calling this by issuing the
669 * appropriate commands to the serial flash.
671 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
674 if (!byte_cnt || byte_cnt > 4)
676 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
678 t4_write_reg(adapter, A_SF_DATA, val);
679 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
680 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
681 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
685 * flash_wait_op - wait for a flash operation to complete
686 * @adapter: the adapter
687 * @attempts: max number of polls of the status register
688 * @delay: delay between polls in ms
690 * Wait for a flash operation to complete by polling the status register.
692 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
698 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
699 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
711 * t4_read_flash - read words from serial flash
712 * @adapter: the adapter
713 * @addr: the start address for the read
714 * @nwords: how many 32-bit words to read
715 * @data: where to store the read data
716 * @byte_oriented: whether to store data as bytes or as words
718 * Read the specified number of 32-bit words from the serial flash.
719 * If @byte_oriented is set the read data is stored as a byte array
720 * (i.e., big-endian), otherwise as 32-bit words in the platform's
723 int t4_read_flash(struct adapter *adapter, unsigned int addr,
724 unsigned int nwords, u32 *data, int byte_oriented)
728 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
731 addr = swab32(addr) | SF_RD_DATA_FAST;
733 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
734 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
737 for ( ; nwords; nwords--, data++) {
738 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
740 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
744 *data = htonl(*data);
750 * t4_write_flash - write up to a page of data to the serial flash
751 * @adapter: the adapter
752 * @addr: the start address to write
753 * @n: length of data to write in bytes
754 * @data: the data to write
755 * @byte_oriented: whether to store data as bytes or as words
757 * Writes up to a page of data (256 bytes) to the serial flash starting
758 * at the given address. All the data must be written to the same page.
759 * If @byte_oriented is set the write data is stored as byte stream
760 * (i.e. matches what on disk), otherwise in big-endian.
762 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
763 unsigned int n, const u8 *data, int byte_oriented)
766 u32 buf[SF_PAGE_SIZE / 4];
767 unsigned int i, c, left, val, offset = addr & 0xff;
769 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
772 val = swab32(addr) | SF_PROG_PAGE;
774 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
775 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
778 for (left = n; left; left -= c) {
780 for (val = 0, i = 0; i < c; ++i)
781 val = (val << 8) + *data++;
786 ret = sf1_write(adapter, c, c != left, 1, val);
790 ret = flash_wait_op(adapter, 8, 1);
794 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
796 /* Read the page to verify the write succeeded */
797 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
802 if (memcmp(data - n, (u8 *)buf + offset, n)) {
803 CH_ERR(adapter, "failed to correctly write the flash page "
810 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
815 * t4_get_fw_version - read the firmware version
816 * @adapter: the adapter
817 * @vers: where to place the version
819 * Reads the FW version from flash.
821 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
823 return t4_read_flash(adapter,
824 FLASH_FW_START + offsetof(struct fw_hdr, fw_ver), 1,
829 * t4_get_tp_version - read the TP microcode version
830 * @adapter: the adapter
831 * @vers: where to place the version
833 * Reads the TP microcode version from flash.
835 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
837 return t4_read_flash(adapter, FLASH_FW_START + offsetof(struct fw_hdr,
843 * t4_check_fw_version - check if the FW is compatible with this driver
844 * @adapter: the adapter
846 * Checks if an adapter's FW is compatible with the driver. Returns 0
847 * if there's exact match, a negative error if the version could not be
848 * read or there's a major version mismatch, and a positive value if the
849 * expected major version is found but there's a minor version mismatch.
851 int t4_check_fw_version(struct adapter *adapter)
853 int ret, major, minor, micro;
855 ret = t4_get_fw_version(adapter, &adapter->params.fw_vers);
857 ret = t4_get_tp_version(adapter, &adapter->params.tp_vers);
861 major = G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers);
862 minor = G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers);
863 micro = G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers);
865 if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
866 CH_ERR(adapter, "card FW has major version %u, driver wants "
867 "%u\n", major, FW_VERSION_MAJOR);
871 if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
872 return 0; /* perfect match */
874 /* Minor/micro version mismatch. Report it but often it's OK. */
879 * t4_flash_erase_sectors - erase a range of flash sectors
880 * @adapter: the adapter
881 * @start: the first sector to erase
882 * @end: the last sector to erase
884 * Erases the sectors in the given inclusive range.
886 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
890 while (start <= end) {
891 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
892 (ret = sf1_write(adapter, 4, 0, 1,
893 SF_ERASE_SECTOR | (start << 8))) != 0 ||
894 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
895 CH_ERR(adapter, "erase of flash sector %d failed, "
896 "error %d\n", start, ret);
901 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
906 * t4_flash_cfg_addr - return the address of the flash configuration file
907 * @adapter: the adapter
909 * Return the address within the flash where the Firmware Configuration
912 unsigned int t4_flash_cfg_addr(struct adapter *adapter)
914 if (adapter->params.sf_size == 0x100000)
915 return FLASH_FPGA_CFG_START;
917 return FLASH_CFG_START;
921 * t4_load_cfg - download config file
923 * @cfg_data: the cfg text file to write
924 * @size: text file size
926 * Write the supplied config text file to the card's serial flash.
928 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
932 unsigned int flash_cfg_start_sec;
933 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
935 addr = t4_flash_cfg_addr(adap);
936 flash_cfg_start_sec = addr / SF_SEC_SIZE;
938 if (size > FLASH_CFG_MAX_SIZE) {
939 CH_ERR(adap, "cfg file too large, max is %u bytes\n",
944 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
946 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
947 flash_cfg_start_sec + i - 1);
949 * If size == 0 then we're simply erasing the FLASH sectors associated
950 * with the on-adapter Firmware Configuration File.
952 if (ret || size == 0)
955 /* this will write to the flash up to SF_PAGE_SIZE at a time */
956 for (i = 0; i< size; i+= SF_PAGE_SIZE) {
957 if ( (size - i) < SF_PAGE_SIZE)
961 ret = t4_write_flash(adap, addr, n, cfg_data, 1);
965 addr += SF_PAGE_SIZE;
966 cfg_data += SF_PAGE_SIZE;
971 CH_ERR(adap, "config file %s failed %d\n",
972 (size == 0 ? "clear" : "download"), ret);
978 * t4_load_fw - download firmware
980 * @fw_data: the firmware image to write
983 * Write the supplied firmware image to the card's serial flash.
985 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
990 u8 first_page[SF_PAGE_SIZE];
991 const u32 *p = (const u32 *)fw_data;
992 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
993 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
996 CH_ERR(adap, "FW image has no data\n");
1000 CH_ERR(adap, "FW image size not multiple of 512 bytes\n");
1003 if (ntohs(hdr->len512) * 512 != size) {
1004 CH_ERR(adap, "FW image size differs from size in FW header\n");
1007 if (size > FLASH_FW_MAX_SIZE) {
1008 CH_ERR(adap, "FW image too large, max is %u bytes\n",
1013 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1014 csum += ntohl(p[i]);
1016 if (csum != 0xffffffff) {
1017 CH_ERR(adap, "corrupted firmware image, checksum %#x\n",
1022 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
1023 ret = t4_flash_erase_sectors(adap, FLASH_FW_START_SEC,
1024 FLASH_FW_START_SEC + i - 1);
1029 * We write the correct version at the end so the driver can see a bad
1030 * version if the FW write fails. Start by writing a copy of the
1031 * first page with a bad version.
1033 memcpy(first_page, fw_data, SF_PAGE_SIZE);
1034 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
1035 ret = t4_write_flash(adap, FLASH_FW_START, SF_PAGE_SIZE, first_page, 1);
1039 addr = FLASH_FW_START;
1040 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1041 addr += SF_PAGE_SIZE;
1042 fw_data += SF_PAGE_SIZE;
1043 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
1048 ret = t4_write_flash(adap,
1049 FLASH_FW_START + offsetof(struct fw_hdr, fw_ver),
1050 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
1053 CH_ERR(adap, "firmware download failed, error %d\n", ret);
1057 /* BIOS boot header */
1058 typedef struct boot_header_s {
1059 u8 signature[2]; /* signature */
1060 u8 length; /* image length (include header) */
1061 u8 offset[4]; /* initialization vector */
1062 u8 reserved[19]; /* reserved */
1063 u8 exheader[2]; /* offset to expansion header */
1067 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
1068 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */
1069 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */
1070 BOOT_MIN_SIZE = sizeof(boot_header_t), /* at least basic header */
1071 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC /* 1 byte * length increment */
1075 * t4_load_boot - download boot flash
1076 * @adapter: the adapter
1077 * @boot_data: the boot image to write
1080 * Write the supplied boot image to the card's serial flash.
1081 * The boot image has the following sections: a 28-byte header and the
1084 int t4_load_boot(struct adapter *adap, const u8 *boot_data,
1085 unsigned int boot_addr, unsigned int size)
1089 unsigned int boot_sector = boot_addr * 1024;
1090 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1093 * Perform some primitive sanity testing to avoid accidentally
1094 * writing garbage over the boot sectors. We ought to check for
1095 * more but it's not worth it for now ...
1097 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
1098 CH_ERR(adap, "boot image too small/large\n");
1103 * Make sure the boot image does not encroach on the firmware region
1105 if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
1106 CH_ERR(adap, "boot image encroaching on firmware region\n");
1110 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
1111 ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
1112 (boot_sector >> 16) + i - 1);
1117 * Skip over the first SF_PAGE_SIZE worth of data and write it after
1118 * we finish copying the rest of the boot image. This will ensure
1119 * that the BIOS boot header will only be written if the boot image
1120 * was written in full.
1123 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1124 addr += SF_PAGE_SIZE;
1125 boot_data += SF_PAGE_SIZE;
1126 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
1131 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, boot_data, 0);
1135 CH_ERR(adap, "boot image download failed, error %d\n", ret);
1140 * t4_read_cimq_cfg - read CIM queue configuration
1141 * @adap: the adapter
1142 * @base: holds the queue base addresses in bytes
1143 * @size: holds the queue sizes in bytes
1144 * @thres: holds the queue full thresholds in bytes
1146 * Returns the current configuration of the CIM queues, starting with
1147 * the IBQs, then the OBQs.
1149 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
1153 for (i = 0; i < CIM_NUM_IBQ; i++) {
1154 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
1156 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1157 *base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1158 *size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1159 *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
1161 for (i = 0; i < CIM_NUM_OBQ; i++) {
1162 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1164 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1165 *base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1166 *size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1171 * t4_read_cim_ibq - read the contents of a CIM inbound queue
1172 * @adap: the adapter
1173 * @qid: the queue index
1174 * @data: where to store the queue contents
1175 * @n: capacity of @data in 32-bit words
1177 * Reads the contents of the selected CIM queue starting at address 0 up
1178 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
1179 * error and the number of 32-bit words actually read on success.
1181 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1185 const unsigned int nwords = CIM_IBQ_SIZE * 4;
1187 if (qid > 5 || (n & 3))
1190 addr = qid * nwords;
1194 for (i = 0; i < n; i++, addr++) {
1195 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
1197 err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
1201 *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
1203 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
1208 * t4_read_cim_obq - read the contents of a CIM outbound queue
1209 * @adap: the adapter
1210 * @qid: the queue index
1211 * @data: where to store the queue contents
1212 * @n: capacity of @data in 32-bit words
1214 * Reads the contents of the selected CIM queue starting at address 0 up
1215 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
1216 * error and the number of 32-bit words actually read on success.
1218 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1221 unsigned int addr, v, nwords;
1223 if (qid > 5 || (n & 3))
1226 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1227 V_QUENUMSELECT(qid));
1228 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1230 addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */
1231 nwords = G_CIMQSIZE(v) * 64; /* same */
1235 for (i = 0; i < n; i++, addr++) {
1236 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
1238 err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
1242 *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
1244 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
1250 CIM_CTL_BASE = 0x2000,
1251 CIM_PBT_ADDR_BASE = 0x2800,
1252 CIM_PBT_LRF_BASE = 0x3000,
1253 CIM_PBT_DATA_BASE = 0x3800
1257 * t4_cim_read - read a block from CIM internal address space
1258 * @adap: the adapter
1259 * @addr: the start address within the CIM address space
1260 * @n: number of words to read
1261 * @valp: where to store the result
1263 * Reads a block of 4-byte words from the CIM intenal address space.
1265 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
1270 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1273 for ( ; !ret && n--; addr += 4) {
1274 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
1275 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1278 *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
1284 * t4_cim_write - write a block into CIM internal address space
1285 * @adap: the adapter
1286 * @addr: the start address within the CIM address space
1287 * @n: number of words to write
1288 * @valp: set of values to write
1290 * Writes a block of 4-byte words into the CIM intenal address space.
1292 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
1293 const unsigned int *valp)
1297 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1300 for ( ; !ret && n--; addr += 4) {
1301 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
1302 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
1303 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1309 static int t4_cim_write1(struct adapter *adap, unsigned int addr, unsigned int val)
1311 return t4_cim_write(adap, addr, 1, &val);
1315 * t4_cim_ctl_read - read a block from CIM control region
1316 * @adap: the adapter
1317 * @addr: the start address within the CIM control region
1318 * @n: number of words to read
1319 * @valp: where to store the result
1321 * Reads a block of 4-byte words from the CIM control region.
1323 int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
1326 return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
1330 * t4_cim_read_la - read CIM LA capture buffer
1331 * @adap: the adapter
1332 * @la_buf: where to store the LA data
1333 * @wrptr: the HW write pointer within the capture buffer
1335 * Reads the contents of the CIM LA buffer with the most recent entry at
1336 * the end of the returned data and with the entry at @wrptr first.
1337 * We try to leave the LA in the running state we find it in.
1339 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
1342 unsigned int cfg, val, idx;
1344 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
1348 if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */
1349 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
1354 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1358 idx = G_UPDBGLAWRPTR(val);
1362 for (i = 0; i < adap->params.cim_la_size; i++) {
1363 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1364 V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
1367 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1370 if (val & F_UPDBGLARDEN) {
1374 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
1377 idx = (idx + 1) & M_UPDBGLARDPTR;
1380 if (cfg & F_UPDBGLAEN) {
1381 int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1382 cfg & ~F_UPDBGLARDEN);
1389 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
1390 unsigned int *pif_req_wrptr,
1391 unsigned int *pif_rsp_wrptr)
1394 u32 cfg, val, req, rsp;
1396 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1397 if (cfg & F_LADBGEN)
1398 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1400 val = t4_read_reg(adap, A_CIM_DEBUGSTS);
1401 req = G_POLADBGWRPTR(val);
1402 rsp = G_PILADBGWRPTR(val);
1404 *pif_req_wrptr = req;
1406 *pif_rsp_wrptr = rsp;
1408 for (i = 0; i < CIM_PIFLA_SIZE; i++) {
1409 for (j = 0; j < 6; j++) {
1410 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
1411 V_PILADBGRDPTR(rsp));
1412 *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
1413 *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
1417 req = (req + 2) & M_POLADBGRDPTR;
1418 rsp = (rsp + 2) & M_PILADBGRDPTR;
1420 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1423 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
1428 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1429 if (cfg & F_LADBGEN)
1430 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1432 for (i = 0; i < CIM_MALA_SIZE; i++) {
1433 for (j = 0; j < 5; j++) {
1435 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
1436 V_PILADBGRDPTR(idx));
1437 *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
1438 *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
1441 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1445 * t4_tp_read_la - read TP LA capture buffer
1446 * @adap: the adapter
1447 * @la_buf: where to store the LA data
1448 * @wrptr: the HW write pointer within the capture buffer
1450 * Reads the contents of the TP LA buffer with the most recent entry at
1451 * the end of the returned data and with the entry at @wrptr first.
1452 * We leave the LA in the running state we find it in.
1454 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
1456 bool last_incomplete;
1457 unsigned int i, cfg, val, idx;
1459 cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
1460 if (cfg & F_DBGLAENABLE) /* freeze LA */
1461 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1462 adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
1464 val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
1465 idx = G_DBGLAWPTR(val);
1466 last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
1467 if (last_incomplete)
1468 idx = (idx + 1) & M_DBGLARPTR;
1473 val &= ~V_DBGLARPTR(M_DBGLARPTR);
1474 val |= adap->params.tp.la_mask;
1476 for (i = 0; i < TPLA_SIZE; i++) {
1477 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
1478 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
1479 idx = (idx + 1) & M_DBGLARPTR;
1482 /* Wipe out last entry if it isn't valid */
1483 if (last_incomplete)
1484 la_buf[TPLA_SIZE - 1] = ~0ULL;
1486 if (cfg & F_DBGLAENABLE) /* restore running state */
1487 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1488 cfg | adap->params.tp.la_mask);
1491 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
1495 for (i = 0; i < 8; i++) {
1496 u32 *p = la_buf + i;
1498 t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
1499 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
1500 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
1501 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
1502 *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
1506 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1507 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
1510 * t4_link_start - apply link configuration to MAC/PHY
1511 * @phy: the PHY to setup
1512 * @mac: the MAC to setup
1513 * @lc: the requested link configuration
1515 * Set up a port's MAC and PHY according to a desired link configuration.
1516 * - If the PHY can auto-negotiate first decide what to advertise, then
1517 * enable/disable auto-negotiation as desired, and reset.
1518 * - If the PHY does not auto-negotiate just reset it.
1519 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1520 * otherwise do it later based on the outcome of auto-negotiation.
1522 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1523 struct link_config *lc)
1525 struct fw_port_cmd c;
1526 unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
1529 if (lc->requested_fc & PAUSE_RX)
1530 fc |= FW_PORT_CAP_FC_RX;
1531 if (lc->requested_fc & PAUSE_TX)
1532 fc |= FW_PORT_CAP_FC_TX;
1534 memset(&c, 0, sizeof(c));
1535 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1536 F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1537 c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1540 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1541 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1542 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1543 } else if (lc->autoneg == AUTONEG_DISABLE) {
1544 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1545 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1547 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1549 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1553 * t4_restart_aneg - restart autonegotiation
1554 * @adap: the adapter
1555 * @mbox: mbox to use for the FW command
1556 * @port: the port id
1558 * Restarts autonegotiation for the selected port.
1560 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1562 struct fw_port_cmd c;
1564 memset(&c, 0, sizeof(c));
1565 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1566 F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1567 c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1569 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1570 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1574 unsigned int mask; /* bits to check in interrupt status */
1575 const char *msg; /* message to print or NULL */
1576 short stat_idx; /* stat counter to increment or -1 */
1577 unsigned short fatal; /* whether the condition reported is fatal */
1581 * t4_handle_intr_status - table driven interrupt handler
1582 * @adapter: the adapter that generated the interrupt
1583 * @reg: the interrupt status register to process
1584 * @acts: table of interrupt actions
1586 * A table driven interrupt handler that applies a set of masks to an
1587 * interrupt status word and performs the corresponding actions if the
1588 * interrupts described by the mask have occured. The actions include
1589 * optionally emitting a warning or alert message. The table is terminated
1590 * by an entry specifying mask 0. Returns the number of fatal interrupt
1593 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1594 const struct intr_info *acts)
1597 unsigned int mask = 0;
1598 unsigned int status = t4_read_reg(adapter, reg);
1600 for ( ; acts->mask; ++acts) {
1601 if (!(status & acts->mask))
1605 CH_ALERT(adapter, "%s (0x%x)\n",
1606 acts->msg, status & acts->mask);
1607 } else if (acts->msg)
1608 CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n",
1609 acts->msg, status & acts->mask);
1613 if (status) /* clear processed interrupts */
1614 t4_write_reg(adapter, reg, status);
1619 * Interrupt handler for the PCIE module.
1621 static void pcie_intr_handler(struct adapter *adapter)
1623 static struct intr_info sysbus_intr_info[] = {
1624 { F_RNPP, "RXNP array parity error", -1, 1 },
1625 { F_RPCP, "RXPC array parity error", -1, 1 },
1626 { F_RCIP, "RXCIF array parity error", -1, 1 },
1627 { F_RCCP, "Rx completions control array parity error", -1, 1 },
1628 { F_RFTP, "RXFT array parity error", -1, 1 },
1631 static struct intr_info pcie_port_intr_info[] = {
1632 { F_TPCP, "TXPC array parity error", -1, 1 },
1633 { F_TNPP, "TXNP array parity error", -1, 1 },
1634 { F_TFTP, "TXFT array parity error", -1, 1 },
1635 { F_TCAP, "TXCA array parity error", -1, 1 },
1636 { F_TCIP, "TXCIF array parity error", -1, 1 },
1637 { F_RCAP, "RXCA array parity error", -1, 1 },
1638 { F_OTDD, "outbound request TLP discarded", -1, 1 },
1639 { F_RDPE, "Rx data parity error", -1, 1 },
1640 { F_TDUE, "Tx uncorrectable data error", -1, 1 },
1643 static struct intr_info pcie_intr_info[] = {
1644 { F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
1645 { F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
1646 { F_MSIDATAPERR, "MSI data parity error", -1, 1 },
1647 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1648 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1649 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1650 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1651 { F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
1652 { F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
1653 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1654 { F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
1655 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1656 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1657 { F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
1658 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1659 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1660 { F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
1661 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1662 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1663 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1664 { F_FIDPERR, "PCI FID parity error", -1, 1 },
1665 { F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
1666 { F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
1667 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1668 { F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
1669 { F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
1670 { F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
1671 { F_PCIESINT, "PCI core secondary fault", -1, 1 },
1672 { F_PCIEPINT, "PCI core primary fault", -1, 1 },
1673 { F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
1680 fat = t4_handle_intr_status(adapter,
1681 A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1683 t4_handle_intr_status(adapter,
1684 A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1685 pcie_port_intr_info) +
1686 t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE, pcie_intr_info);
1688 t4_fatal_err(adapter);
1692 * TP interrupt handler.
1694 static void tp_intr_handler(struct adapter *adapter)
1696 static struct intr_info tp_intr_info[] = {
1697 { 0x3fffffff, "TP parity error", -1, 1 },
1698 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1702 if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
1703 t4_fatal_err(adapter);
1707 * SGE interrupt handler.
1709 static void sge_intr_handler(struct adapter *adapter)
1714 static struct intr_info sge_intr_info[] = {
1715 { F_ERR_CPL_EXCEED_IQE_SIZE,
1716 "SGE received CPL exceeding IQE size", -1, 1 },
1717 { F_ERR_INVALID_CIDX_INC,
1718 "SGE GTS CIDX increment too large", -1, 0 },
1719 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
1720 { F_ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
1721 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
1722 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1723 { F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1725 { F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1727 { F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1729 { F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1731 { F_ERR_ING_CTXT_PRIO,
1732 "SGE too many priority ingress contexts", -1, 0 },
1733 { F_ERR_EGR_CTXT_PRIO,
1734 "SGE too many priority egress contexts", -1, 0 },
1735 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1736 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1740 v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
1741 ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
1743 CH_ALERT(adapter, "SGE parity error (%#llx)\n",
1744 (unsigned long long)v);
1745 t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
1746 t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
1749 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
1751 err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
1752 if (err & F_ERROR_QID_VALID) {
1753 CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
1754 if (err & F_UNCAPTURED_ERROR)
1755 CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
1756 t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
1757 F_UNCAPTURED_ERROR);
1761 t4_fatal_err(adapter);
1764 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
1765 F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
1766 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
1767 F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
1770 * CIM interrupt handler.
1772 static void cim_intr_handler(struct adapter *adapter)
1774 static struct intr_info cim_intr_info[] = {
1775 { F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1776 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
1777 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
1778 { F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1779 { F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1780 { F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1781 { F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1784 static struct intr_info cim_upintr_info[] = {
1785 { F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1786 { F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1787 { F_ILLWRINT, "CIM illegal write", -1, 1 },
1788 { F_ILLRDINT, "CIM illegal read", -1, 1 },
1789 { F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1790 { F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1791 { F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1792 { F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1793 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1794 { F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1795 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1796 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1797 { F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1798 { F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1799 { F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1800 { F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1801 { F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1802 { F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1803 { F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1804 { F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1805 { F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1806 { F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1807 { F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1808 { F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1809 { F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1810 { F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1811 { F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1812 { F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1818 fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
1820 t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
1823 t4_fatal_err(adapter);
1827 * ULP RX interrupt handler.
1829 static void ulprx_intr_handler(struct adapter *adapter)
1831 static struct intr_info ulprx_intr_info[] = {
1832 { F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
1833 { F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
1834 { 0x7fffff, "ULPRX parity error", -1, 1 },
1838 if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
1839 t4_fatal_err(adapter);
1843 * ULP TX interrupt handler.
1845 static void ulptx_intr_handler(struct adapter *adapter)
1847 static struct intr_info ulptx_intr_info[] = {
1848 { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1850 { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1852 { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1854 { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1856 { 0xfffffff, "ULPTX parity error", -1, 1 },
1860 if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
1861 t4_fatal_err(adapter);
1865 * PM TX interrupt handler.
1867 static void pmtx_intr_handler(struct adapter *adapter)
1869 static struct intr_info pmtx_intr_info[] = {
1870 { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1871 { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1872 { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1873 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1874 { 0xffffff0, "PMTX framing error", -1, 1 },
1875 { F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1876 { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
1878 { F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1879 { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1883 if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
1884 t4_fatal_err(adapter);
1888 * PM RX interrupt handler.
1890 static void pmrx_intr_handler(struct adapter *adapter)
1892 static struct intr_info pmrx_intr_info[] = {
1893 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1894 { 0x3ffff0, "PMRX framing error", -1, 1 },
1895 { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1896 { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
1898 { F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1899 { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1903 if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
1904 t4_fatal_err(adapter);
1908 * CPL switch interrupt handler.
1910 static void cplsw_intr_handler(struct adapter *adapter)
1912 static struct intr_info cplsw_intr_info[] = {
1913 { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1914 { F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1915 { F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1916 { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1917 { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1918 { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1922 if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
1923 t4_fatal_err(adapter);
1927 * LE interrupt handler.
1929 static void le_intr_handler(struct adapter *adap)
1931 static struct intr_info le_intr_info[] = {
1932 { F_LIPMISS, "LE LIP miss", -1, 0 },
1933 { F_LIP0, "LE 0 LIP error", -1, 0 },
1934 { F_PARITYERR, "LE parity error", -1, 1 },
1935 { F_UNKNOWNCMD, "LE unknown command", -1, 1 },
1936 { F_REQQPARERR, "LE request queue parity error", -1, 1 },
1940 if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE, le_intr_info))
1945 * MPS interrupt handler.
1947 static void mps_intr_handler(struct adapter *adapter)
1949 static struct intr_info mps_rx_intr_info[] = {
1950 { 0xffffff, "MPS Rx parity error", -1, 1 },
1953 static struct intr_info mps_tx_intr_info[] = {
1954 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
1955 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1956 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
1958 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
1960 { F_BUBBLE, "MPS Tx underflow", -1, 1 },
1961 { F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1962 { F_FRMERR, "MPS Tx framing error", -1, 1 },
1965 static struct intr_info mps_trc_intr_info[] = {
1966 { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
1967 { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
1969 { F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
1972 static struct intr_info mps_stat_sram_intr_info[] = {
1973 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1976 static struct intr_info mps_stat_tx_intr_info[] = {
1977 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1980 static struct intr_info mps_stat_rx_intr_info[] = {
1981 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1984 static struct intr_info mps_cls_intr_info[] = {
1985 { F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1986 { F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1987 { F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1993 fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
1995 t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
1997 t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
1998 mps_trc_intr_info) +
1999 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
2000 mps_stat_sram_intr_info) +
2001 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
2002 mps_stat_tx_intr_info) +
2003 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
2004 mps_stat_rx_intr_info) +
2005 t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
2008 t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
2009 t4_read_reg(adapter, A_MPS_INT_CAUSE); /* flush */
2011 t4_fatal_err(adapter);
2014 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | F_ECC_UE_INT_CAUSE)
2017 * EDC/MC interrupt handler.
2019 static void mem_intr_handler(struct adapter *adapter, int idx)
2021 static const char name[3][5] = { "EDC0", "EDC1", "MC" };
2023 unsigned int addr, cnt_addr, v;
2025 if (idx <= MEM_EDC1) {
2026 addr = EDC_REG(A_EDC_INT_CAUSE, idx);
2027 cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
2029 addr = A_MC_INT_CAUSE;
2030 cnt_addr = A_MC_ECC_STATUS;
2033 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
2034 if (v & F_PERR_INT_CAUSE)
2035 CH_ALERT(adapter, "%s FIFO parity error\n", name[idx]);
2036 if (v & F_ECC_CE_INT_CAUSE) {
2037 u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
2039 t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
2040 CH_WARN_RATELIMIT(adapter,
2041 "%u %s correctable ECC data error%s\n",
2042 cnt, name[idx], cnt > 1 ? "s" : "");
2044 if (v & F_ECC_UE_INT_CAUSE)
2045 CH_ALERT(adapter, "%s uncorrectable ECC data error\n",
2048 t4_write_reg(adapter, addr, v);
2049 if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
2050 t4_fatal_err(adapter);
2054 * MA interrupt handler.
2056 static void ma_intr_handler(struct adapter *adapter)
2058 u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
2060 if (status & F_MEM_PERR_INT_CAUSE)
2061 CH_ALERT(adapter, "MA parity error, parity status %#x\n",
2062 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS));
2063 if (status & F_MEM_WRAP_INT_CAUSE) {
2064 v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
2065 CH_ALERT(adapter, "MA address wrap-around error by client %u to"
2066 " address %#x\n", G_MEM_WRAP_CLIENT_NUM(v),
2067 G_MEM_WRAP_ADDRESS(v) << 4);
2069 t4_write_reg(adapter, A_MA_INT_CAUSE, status);
2070 t4_fatal_err(adapter);
2074 * SMB interrupt handler.
2076 static void smb_intr_handler(struct adapter *adap)
2078 static struct intr_info smb_intr_info[] = {
2079 { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
2080 { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
2081 { F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
2085 if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
2090 * NC-SI interrupt handler.
2092 static void ncsi_intr_handler(struct adapter *adap)
2094 static struct intr_info ncsi_intr_info[] = {
2095 { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
2096 { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
2097 { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
2098 { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
2102 if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
2107 * XGMAC interrupt handler.
2109 static void xgmac_intr_handler(struct adapter *adap, int port)
2111 u32 v = t4_read_reg(adap, PORT_REG(port, A_XGMAC_PORT_INT_CAUSE));
2113 v &= F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR;
2117 if (v & F_TXFIFO_PRTY_ERR)
2118 CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n", port);
2119 if (v & F_RXFIFO_PRTY_ERR)
2120 CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n", port);
2121 t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_INT_CAUSE), v);
2126 * PL interrupt handler.
2128 static void pl_intr_handler(struct adapter *adap)
2130 static struct intr_info pl_intr_info[] = {
2131 { F_FATALPERR, "T4 fatal parity error", -1, 1 },
2132 { F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
2136 if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE, pl_intr_info))
2140 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
2141 #define GLBL_INTR_MASK (F_CIM | F_MPS | F_PL | F_PCIE | F_MC | F_EDC0 | \
2142 F_EDC1 | F_LE | F_TP | F_MA | F_PM_TX | F_PM_RX | F_ULP_RX | \
2143 F_CPL_SWITCH | F_SGE | F_ULP_TX)
2146 * t4_slow_intr_handler - control path interrupt handler
2147 * @adapter: the adapter
2149 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
2150 * The designation 'slow' is because it involves register reads, while
2151 * data interrupts typically don't involve any MMIOs.
2153 int t4_slow_intr_handler(struct adapter *adapter)
2155 u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
2157 if (!(cause & GLBL_INTR_MASK))
2160 cim_intr_handler(adapter);
2162 mps_intr_handler(adapter);
2164 ncsi_intr_handler(adapter);
2166 pl_intr_handler(adapter);
2168 smb_intr_handler(adapter);
2169 if (cause & F_XGMAC0)
2170 xgmac_intr_handler(adapter, 0);
2171 if (cause & F_XGMAC1)
2172 xgmac_intr_handler(adapter, 1);
2173 if (cause & F_XGMAC_KR0)
2174 xgmac_intr_handler(adapter, 2);
2175 if (cause & F_XGMAC_KR1)
2176 xgmac_intr_handler(adapter, 3);
2178 pcie_intr_handler(adapter);
2180 mem_intr_handler(adapter, MEM_MC);
2182 mem_intr_handler(adapter, MEM_EDC0);
2184 mem_intr_handler(adapter, MEM_EDC1);
2186 le_intr_handler(adapter);
2188 tp_intr_handler(adapter);
2190 ma_intr_handler(adapter);
2191 if (cause & F_PM_TX)
2192 pmtx_intr_handler(adapter);
2193 if (cause & F_PM_RX)
2194 pmrx_intr_handler(adapter);
2195 if (cause & F_ULP_RX)
2196 ulprx_intr_handler(adapter);
2197 if (cause & F_CPL_SWITCH)
2198 cplsw_intr_handler(adapter);
2200 sge_intr_handler(adapter);
2201 if (cause & F_ULP_TX)
2202 ulptx_intr_handler(adapter);
2204 /* Clear the interrupts just processed for which we are the master. */
2205 t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
2206 (void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
2211 * t4_intr_enable - enable interrupts
2212 * @adapter: the adapter whose interrupts should be enabled
2214 * Enable PF-specific interrupts for the calling function and the top-level
2215 * interrupt concentrator for global interrupts. Interrupts are already
2216 * enabled at each module, here we just enable the roots of the interrupt
2219 * Note: this function should be called only when the driver manages
2220 * non PF-specific interrupts from the various HW modules. Only one PCI
2221 * function at a time should be doing this.
2223 void t4_intr_enable(struct adapter *adapter)
2225 u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2227 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
2228 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
2229 F_ERR_DROPPED_DB | F_ERR_DATA_CPL_ON_HIGH_QID1 |
2230 F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
2231 F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
2232 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
2233 F_ERR_EGR_CTXT_PRIO | F_INGRESS_SIZE_ERR |
2235 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
2236 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
2240 * t4_intr_disable - disable interrupts
2241 * @adapter: the adapter whose interrupts should be disabled
2243 * Disable interrupts. We only disable the top-level interrupt
2244 * concentrators. The caller must be a PCI function managing global
2247 void t4_intr_disable(struct adapter *adapter)
2249 u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2251 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
2252 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
2256 * t4_intr_clear - clear all interrupts
2257 * @adapter: the adapter whose interrupts should be cleared
2259 * Clears all interrupts. The caller must be a PCI function managing
2260 * global interrupts.
2262 void t4_intr_clear(struct adapter *adapter)
2264 static const unsigned int cause_reg[] = {
2265 A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
2266 A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
2267 A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
2268 A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
2270 A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS, A_MA_INT_CAUSE,
2271 A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
2272 A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
2273 MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
2275 A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE,
2276 A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE,
2277 A_MPS_RX_PERR_INT_CAUSE,
2279 MYPF_REG(A_PL_PF_INT_CAUSE),
2286 for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
2287 t4_write_reg(adapter, cause_reg[i], 0xffffffff);
2289 t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
2290 (void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
2294 * hash_mac_addr - return the hash value of a MAC address
2295 * @addr: the 48-bit Ethernet MAC address
2297 * Hashes a MAC address according to the hash function used by HW inexact
2298 * (hash) address matching.
2300 static int hash_mac_addr(const u8 *addr)
2302 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
2303 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
2311 * t4_config_rss_range - configure a portion of the RSS mapping table
2312 * @adapter: the adapter
2313 * @mbox: mbox to use for the FW command
2314 * @viid: virtual interface whose RSS subtable is to be written
2315 * @start: start entry in the table to write
2316 * @n: how many table entries to write
2317 * @rspq: values for the "response queue" (Ingress Queue) lookup table
2318 * @nrspq: number of values in @rspq
2320 * Programs the selected part of the VI's RSS mapping table with the
2321 * provided values. If @nrspq < @n the supplied values are used repeatedly
2322 * until the full table range is populated.
2324 * The caller must ensure the values in @rspq are in the range allowed for
2327 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2328 int start, int n, const u16 *rspq, unsigned int nrspq)
2331 const u16 *rsp = rspq;
2332 const u16 *rsp_end = rspq + nrspq;
2333 struct fw_rss_ind_tbl_cmd cmd;
2335 memset(&cmd, 0, sizeof(cmd));
2336 cmd.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2337 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2338 V_FW_RSS_IND_TBL_CMD_VIID(viid));
2339 cmd.retval_len16 = htonl(FW_LEN16(cmd));
2343 * Each firmware RSS command can accommodate up to 32 RSS Ingress
2344 * Queue Identifiers. These Ingress Queue IDs are packed three to
2345 * a 32-bit word as 10-bit values with the upper remaining 2 bits
2349 int nq = min(n, 32);
2351 __be32 *qp = &cmd.iq0_to_iq2;
2354 * Set up the firmware RSS command header to send the next
2355 * "nq" Ingress Queue IDs to the firmware.
2357 cmd.niqid = htons(nq);
2358 cmd.startidx = htons(start);
2361 * "nq" more done for the start of the next loop.
2367 * While there are still Ingress Queue IDs to stuff into the
2368 * current firmware RSS command, retrieve them from the
2369 * Ingress Queue ID array and insert them into the command.
2373 * Grab up to the next 3 Ingress Queue IDs (wrapping
2374 * around the Ingress Queue ID array if necessary) and
2375 * insert them into the firmware RSS command at the
2376 * current 3-tuple position within the commad.
2380 int nqbuf = min(3, nq);
2383 qbuf[0] = qbuf[1] = qbuf[2] = 0;
2384 while (nqbuf && nq_packed < 32) {
2391 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
2392 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
2393 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
2397 * Send this portion of the RRS table update to the firmware;
2398 * bail out on any errors.
2400 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
2409 * t4_config_glbl_rss - configure the global RSS mode
2410 * @adapter: the adapter
2411 * @mbox: mbox to use for the FW command
2412 * @mode: global RSS mode
2413 * @flags: mode-specific flags
2415 * Sets the global RSS mode.
2417 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2420 struct fw_rss_glb_config_cmd c;
2422 memset(&c, 0, sizeof(c));
2423 c.op_to_write = htonl(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
2424 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2425 c.retval_len16 = htonl(FW_LEN16(c));
2426 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
2427 c.u.manual.mode_pkd = htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2428 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2429 c.u.basicvirtual.mode_pkd =
2430 htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2431 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2434 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2438 * t4_config_vi_rss - configure per VI RSS settings
2439 * @adapter: the adapter
2440 * @mbox: mbox to use for the FW command
2443 * @defq: id of the default RSS queue for the VI.
2445 * Configures VI-specific RSS properties.
2447 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
2448 unsigned int flags, unsigned int defq)
2450 struct fw_rss_vi_config_cmd c;
2452 memset(&c, 0, sizeof(c));
2453 c.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2454 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2455 V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
2456 c.retval_len16 = htonl(FW_LEN16(c));
2457 c.u.basicvirtual.defaultq_to_udpen = htonl(flags |
2458 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
2459 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2462 /* Read an RSS table row */
2463 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
2465 t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
2466 return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
2471 * t4_read_rss - read the contents of the RSS mapping table
2472 * @adapter: the adapter
2473 * @map: holds the contents of the RSS mapping table
2475 * Reads the contents of the RSS hash->queue mapping table.
2477 int t4_read_rss(struct adapter *adapter, u16 *map)
2482 for (i = 0; i < RSS_NENTRIES / 2; ++i) {
2483 ret = rd_rss_row(adapter, i, &val);
2486 *map++ = G_LKPTBLQUEUE0(val);
2487 *map++ = G_LKPTBLQUEUE1(val);
2493 * t4_read_rss_key - read the global RSS key
2494 * @adap: the adapter
2495 * @key: 10-entry array holding the 320-bit RSS key
2497 * Reads the global 320-bit RSS key.
2499 void t4_read_rss_key(struct adapter *adap, u32 *key)
2501 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2502 A_TP_RSS_SECRET_KEY0);
2506 * t4_write_rss_key - program one of the RSS keys
2507 * @adap: the adapter
2508 * @key: 10-entry array holding the 320-bit RSS key
2509 * @idx: which RSS key to write
2511 * Writes one of the RSS keys with the given 320-bit value. If @idx is
2512 * 0..15 the corresponding entry in the RSS key table is written,
2513 * otherwise the global RSS key is written.
2515 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
2517 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2518 A_TP_RSS_SECRET_KEY0);
2519 if (idx >= 0 && idx < 16)
2520 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
2521 V_KEYWRADDR(idx) | F_KEYWREN);
2525 * t4_read_rss_pf_config - read PF RSS Configuration Table
2526 * @adapter: the adapter
2527 * @index: the entry in the PF RSS table to read
2528 * @valp: where to store the returned value
2530 * Reads the PF RSS Configuration Table at the specified index and returns
2531 * the value found there.
2533 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp)
2535 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2536 valp, 1, A_TP_RSS_PF0_CONFIG + index);
2540 * t4_write_rss_pf_config - write PF RSS Configuration Table
2541 * @adapter: the adapter
2542 * @index: the entry in the VF RSS table to read
2543 * @val: the value to store
2545 * Writes the PF RSS Configuration Table at the specified index with the
2548 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, u32 val)
2550 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2551 &val, 1, A_TP_RSS_PF0_CONFIG + index);
2555 * t4_read_rss_vf_config - read VF RSS Configuration Table
2556 * @adapter: the adapter
2557 * @index: the entry in the VF RSS table to read
2558 * @vfl: where to store the returned VFL
2559 * @vfh: where to store the returned VFH
2561 * Reads the VF RSS Configuration Table at the specified index and returns
2562 * the (VFL, VFH) values found there.
2564 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
2570 * Request that the index'th VF Table values be read into VFL/VFH.
2572 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
2573 vrt &= ~(F_VFRDRG | V_VFWRADDR(M_VFWRADDR) | F_VFWREN | F_KEYWREN);
2574 vrt |= V_VFWRADDR(index) | F_VFRDEN;
2575 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
2578 * Grab the VFL/VFH values ...
2580 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2581 vfl, 1, A_TP_RSS_VFL_CONFIG);
2582 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2583 vfh, 1, A_TP_RSS_VFH_CONFIG);
2587 * t4_write_rss_vf_config - write VF RSS Configuration Table
2589 * @adapter: the adapter
2590 * @index: the entry in the VF RSS table to write
2591 * @vfl: the VFL to store
2592 * @vfh: the VFH to store
2594 * Writes the VF RSS Configuration Table at the specified index with the
2595 * specified (VFL, VFH) values.
2597 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
2603 * Load up VFL/VFH with the values to be written ...
2605 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2606 &vfl, 1, A_TP_RSS_VFL_CONFIG);
2607 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2608 &vfh, 1, A_TP_RSS_VFH_CONFIG);
2611 * Write the VFL/VFH into the VF Table at index'th location.
2613 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
2614 vrt &= ~(F_VFRDRG | F_VFRDEN | V_VFWRADDR(M_VFWRADDR) | F_KEYWREN);
2615 vrt |= V_VFWRADDR(index) | F_VFWREN;
2616 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
2620 * t4_read_rss_pf_map - read PF RSS Map
2621 * @adapter: the adapter
2623 * Reads the PF RSS Map register and returns its value.
2625 u32 t4_read_rss_pf_map(struct adapter *adapter)
2629 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2630 &pfmap, 1, A_TP_RSS_PF_MAP);
2635 * t4_write_rss_pf_map - write PF RSS Map
2636 * @adapter: the adapter
2637 * @pfmap: PF RSS Map value
2639 * Writes the specified value to the PF RSS Map register.
2641 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap)
2643 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2644 &pfmap, 1, A_TP_RSS_PF_MAP);
2648 * t4_read_rss_pf_mask - read PF RSS Mask
2649 * @adapter: the adapter
2651 * Reads the PF RSS Mask register and returns its value.
2653 u32 t4_read_rss_pf_mask(struct adapter *adapter)
2657 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2658 &pfmask, 1, A_TP_RSS_PF_MSK);
2663 * t4_write_rss_pf_mask - write PF RSS Mask
2664 * @adapter: the adapter
2665 * @pfmask: PF RSS Mask value
2667 * Writes the specified value to the PF RSS Mask register.
2669 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask)
2671 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2672 &pfmask, 1, A_TP_RSS_PF_MSK);
2676 * t4_set_filter_mode - configure the optional components of filter tuples
2677 * @adap: the adapter
2678 * @mode_map: a bitmap selcting which optional filter components to enable
2680 * Sets the filter mode by selecting the optional components to enable
2681 * in filter tuples. Returns 0 on success and a negative error if the
2682 * requested mode needs more bits than are available for optional
2685 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map)
2687 static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
2691 for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
2692 if (mode_map & (1 << i))
2694 if (nbits > FILTER_OPT_LEN)
2696 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map, 1,
2702 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
2703 * @adap: the adapter
2704 * @v4: holds the TCP/IP counter values
2705 * @v6: holds the TCP/IPv6 counter values
2707 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
2708 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
2710 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
2711 struct tp_tcp_stats *v6)
2713 u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
2715 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
2716 #define STAT(x) val[STAT_IDX(x)]
2717 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
2720 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
2721 ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST);
2722 v4->tcpOutRsts = STAT(OUT_RST);
2723 v4->tcpInSegs = STAT64(IN_SEG);
2724 v4->tcpOutSegs = STAT64(OUT_SEG);
2725 v4->tcpRetransSegs = STAT64(RXT_SEG);
2728 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
2729 ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST);
2730 v6->tcpOutRsts = STAT(OUT_RST);
2731 v6->tcpInSegs = STAT64(IN_SEG);
2732 v6->tcpOutSegs = STAT64(OUT_SEG);
2733 v6->tcpRetransSegs = STAT64(RXT_SEG);
2741 * t4_tp_get_err_stats - read TP's error MIB counters
2742 * @adap: the adapter
2743 * @st: holds the counter values
2745 * Returns the values of TP's error counters.
2747 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
2749 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->macInErrs,
2750 12, A_TP_MIB_MAC_IN_ERR_0);
2751 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlCongDrops,
2752 8, A_TP_MIB_TNL_CNG_DROP_0);
2753 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlTxDrops,
2754 4, A_TP_MIB_TNL_DROP_0);
2755 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->ofldVlanDrops,
2756 4, A_TP_MIB_OFD_VLN_DROP_0);
2757 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tcp6InErrs,
2758 4, A_TP_MIB_TCP_V6IN_ERR_0);
2759 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->ofldNoNeigh,
2760 2, A_TP_MIB_OFD_ARP_DROP);
2764 * t4_tp_get_proxy_stats - read TP's proxy MIB counters
2765 * @adap: the adapter
2766 * @st: holds the counter values
2768 * Returns the values of TP's proxy counters.
2770 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st)
2772 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy,
2773 4, A_TP_MIB_TNL_LPBK_0);
2777 * t4_tp_get_cpl_stats - read TP's CPL MIB counters
2778 * @adap: the adapter
2779 * @st: holds the counter values
2781 * Returns the values of TP's CPL counters.
2783 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
2785 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req,
2786 8, A_TP_MIB_CPL_IN_REQ_0);
2790 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
2791 * @adap: the adapter
2792 * @st: holds the counter values
2794 * Returns the values of TP's RDMA counters.
2796 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
2798 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_mod,
2799 2, A_TP_MIB_RQE_DFR_MOD);
2803 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
2804 * @adap: the adapter
2805 * @idx: the port index
2806 * @st: holds the counter values
2808 * Returns the values of TP's FCoE counters for the selected port.
2810 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
2811 struct tp_fcoe_stats *st)
2815 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDDP,
2816 1, A_TP_MIB_FCOE_DDP_0 + idx);
2817 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDrop,
2818 1, A_TP_MIB_FCOE_DROP_0 + idx);
2819 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
2820 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx);
2821 st->octetsDDP = ((u64)val[0] << 32) | val[1];
2825 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
2826 * @adap: the adapter
2827 * @st: holds the counter values
2829 * Returns the values of TP's counters for non-TCP directly-placed packets.
2831 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
2835 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4,
2837 st->frames = val[0];
2839 st->octets = ((u64)val[2] << 32) | val[3];
2843 * t4_read_mtu_tbl - returns the values in the HW path MTU table
2844 * @adap: the adapter
2845 * @mtus: where to store the MTU values
2846 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
2848 * Reads the HW path MTU table.
2850 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
2855 for (i = 0; i < NMTUS; ++i) {
2856 t4_write_reg(adap, A_TP_MTU_TABLE,
2857 V_MTUINDEX(0xff) | V_MTUVALUE(i));
2858 v = t4_read_reg(adap, A_TP_MTU_TABLE);
2859 mtus[i] = G_MTUVALUE(v);
2861 mtu_log[i] = G_MTUWIDTH(v);
2866 * t4_read_cong_tbl - reads the congestion control table
2867 * @adap: the adapter
2868 * @incr: where to store the alpha values
2870 * Reads the additive increments programmed into the HW congestion
2873 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
2875 unsigned int mtu, w;
2877 for (mtu = 0; mtu < NMTUS; ++mtu)
2878 for (w = 0; w < NCCTRL_WIN; ++w) {
2879 t4_write_reg(adap, A_TP_CCTRL_TABLE,
2880 V_ROWINDEX(0xffff) | (mtu << 5) | w);
2881 incr[mtu][w] = (u16)t4_read_reg(adap,
2882 A_TP_CCTRL_TABLE) & 0x1fff;
2887 * t4_read_pace_tbl - read the pace table
2888 * @adap: the adapter
2889 * @pace_vals: holds the returned values
2891 * Returns the values of TP's pace table in microseconds.
2893 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
2897 for (i = 0; i < NTX_SCHED; i++) {
2898 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
2899 v = t4_read_reg(adap, A_TP_PACE_TABLE);
2900 pace_vals[i] = dack_ticks_to_usec(adap, v);
2905 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
2906 * @adap: the adapter
2907 * @addr: the indirect TP register address
2908 * @mask: specifies the field within the register to modify
2909 * @val: new value for the field
2911 * Sets a field of an indirect TP register to the given value.
2913 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
2914 unsigned int mask, unsigned int val)
2916 t4_write_reg(adap, A_TP_PIO_ADDR, addr);
2917 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
2918 t4_write_reg(adap, A_TP_PIO_DATA, val);
2922 * init_cong_ctrl - initialize congestion control parameters
2923 * @a: the alpha values for congestion control
2924 * @b: the beta values for congestion control
2926 * Initialize the congestion control parameters.
2928 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
2930 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2955 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2958 b[13] = b[14] = b[15] = b[16] = 3;
2959 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2960 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2965 /* The minimum additive increment value for the congestion control table */
2966 #define CC_MIN_INCR 2U
2969 * t4_load_mtus - write the MTU and congestion control HW tables
2970 * @adap: the adapter
2971 * @mtus: the values for the MTU table
2972 * @alpha: the values for the congestion control alpha parameter
2973 * @beta: the values for the congestion control beta parameter
2975 * Write the HW MTU table with the supplied MTUs and the high-speed
2976 * congestion control table with the supplied alpha, beta, and MTUs.
2977 * We write the two tables together because the additive increments
2978 * depend on the MTUs.
2980 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
2981 const unsigned short *alpha, const unsigned short *beta)
2983 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2984 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2985 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2986 28672, 40960, 57344, 81920, 114688, 163840, 229376
2991 for (i = 0; i < NMTUS; ++i) {
2992 unsigned int mtu = mtus[i];
2993 unsigned int log2 = fls(mtu);
2995 if (!(mtu & ((1 << log2) >> 2))) /* round */
2997 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
2998 V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
3000 for (w = 0; w < NCCTRL_WIN; ++w) {
3003 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3006 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3007 (w << 16) | (beta[w] << 13) | inc);
3013 * t4_set_pace_tbl - set the pace table
3014 * @adap: the adapter
3015 * @pace_vals: the pace values in microseconds
3016 * @start: index of the first entry in the HW pace table to set
3017 * @n: how many entries to set
3019 * Sets (a subset of the) HW pace table.
3021 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
3022 unsigned int start, unsigned int n)
3024 unsigned int vals[NTX_SCHED], i;
3025 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
3030 /* convert values from us to dack ticks, rounding to closest value */
3031 for (i = 0; i < n; i++, pace_vals++) {
3032 vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
3033 if (vals[i] > 0x7ff)
3035 if (*pace_vals && vals[i] == 0)
3038 for (i = 0; i < n; i++, start++)
3039 t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
3044 * t4_set_sched_bps - set the bit rate for a HW traffic scheduler
3045 * @adap: the adapter
3046 * @kbps: target rate in Kbps
3047 * @sched: the scheduler index
3049 * Configure a Tx HW scheduler for the target rate.
3051 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
3053 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3054 unsigned int clk = adap->params.vpd.cclk * 1000;
3055 unsigned int selected_cpt = 0, selected_bpt = 0;
3058 kbps *= 125; /* -> bytes */
3059 for (cpt = 1; cpt <= 255; cpt++) {
3061 bpt = (kbps + tps / 2) / tps;
3062 if (bpt > 0 && bpt <= 255) {
3064 delta = v >= kbps ? v - kbps : kbps - v;
3065 if (delta < mindelta) {
3070 } else if (selected_cpt)
3076 t4_write_reg(adap, A_TP_TM_PIO_ADDR,
3077 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3078 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3080 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3082 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3083 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3088 * t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
3089 * @adap: the adapter
3090 * @sched: the scheduler index
3091 * @ipg: the interpacket delay in tenths of nanoseconds
3093 * Set the interpacket delay for a HW packet rate scheduler.
3095 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
3097 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3099 /* convert ipg to nearest number of core clocks */
3100 ipg *= core_ticks_per_usec(adap);
3101 ipg = (ipg + 5000) / 10000;
3102 if (ipg > M_TXTIMERSEPQ0)
3105 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3106 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3108 v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
3110 v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
3111 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3112 t4_read_reg(adap, A_TP_TM_PIO_DATA);
3117 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3118 * @adap: the adapter
3119 * @sched: the scheduler index
3120 * @kbps: the byte rate in Kbps
3121 * @ipg: the interpacket delay in tenths of nanoseconds
3123 * Return the current configuration of a HW Tx scheduler.
3125 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
3128 unsigned int v, addr, bpt, cpt;
3131 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3132 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3133 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3136 bpt = (v >> 8) & 0xff;
3139 *kbps = 0; /* scheduler disabled */
3141 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
3142 *kbps = (v * bpt) / 125;
3146 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3147 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3148 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3152 *ipg = (10000 * v) / core_ticks_per_usec(adap);
3157 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
3158 * clocks. The formula is
3160 * bytes/s = bytes256 * 256 * ClkFreq / 4096
3162 * which is equivalent to
3164 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
3166 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
3168 u64 v = bytes256 * adap->params.vpd.cclk;
3170 return v * 62 + v / 2;
3174 * t4_get_chan_txrate - get the current per channel Tx rates
3175 * @adap: the adapter
3176 * @nic_rate: rates for NIC traffic
3177 * @ofld_rate: rates for offloaded traffic
3179 * Return the current Tx rates in bytes/s for NIC and offloaded traffic
3182 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
3186 v = t4_read_reg(adap, A_TP_TX_TRATE);
3187 nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
3188 nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
3189 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
3190 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
3192 v = t4_read_reg(adap, A_TP_TX_ORATE);
3193 ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
3194 ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
3195 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
3196 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
3200 * t4_set_trace_filter - configure one of the tracing filters
3201 * @adap: the adapter
3202 * @tp: the desired trace filter parameters
3203 * @idx: which filter to configure
3204 * @enable: whether to enable or disable the filter
3206 * Configures one of the tracing filters available in HW. If @enable is
3207 * %0 @tp is not examined and may be %NULL.
3209 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, int idx,
3212 int i, ofst = idx * 4;
3213 u32 data_reg, mask_reg, cfg;
3214 u32 multitrc = F_TRCMULTIFILTER;
3217 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
3221 if (tp->port > 11 || tp->invert > 1 || tp->skip_len > M_TFLENGTH ||
3222 tp->skip_ofst > M_TFOFFSET || tp->min_len > M_TFMINPKTSIZE ||
3223 tp->snap_len > 9600 || (idx && tp->snap_len > 256))
3226 if (tp->snap_len > 256) { /* must be tracer 0 */
3227 if ((t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + 4) |
3228 t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + 8) |
3229 t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + 12)) &
3231 return -EINVAL; /* other tracers are enabled */
3234 i = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B);
3235 if (G_TFCAPTUREMAX(i) > 256 &&
3236 (t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A) & F_TFEN))
3240 /* stop the tracer we'll be changing */
3241 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
3243 /* disable tracing globally if running in the wrong single/multi mode */
3244 cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
3245 if ((cfg & F_TRCEN) && multitrc != (cfg & F_TRCMULTIFILTER)) {
3246 t4_write_reg(adap, A_MPS_TRC_CFG, cfg ^ F_TRCEN);
3247 t4_read_reg(adap, A_MPS_TRC_CFG); /* flush */
3249 if (!(t4_read_reg(adap, A_MPS_TRC_CFG) & F_TRCFIFOEMPTY))
3253 * At this point either the tracing is enabled and in the right mode or
3257 idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
3258 data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
3259 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
3261 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3262 t4_write_reg(adap, data_reg, tp->data[i]);
3263 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
3265 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
3266 V_TFCAPTUREMAX(tp->snap_len) |
3267 V_TFMINPKTSIZE(tp->min_len));
3268 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
3269 V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) |
3270 V_TFPORT(tp->port) | F_TFEN | V_TFINVERTMATCH(tp->invert));
3272 cfg &= ~F_TRCMULTIFILTER;
3273 t4_write_reg(adap, A_MPS_TRC_CFG, cfg | F_TRCEN | multitrc);
3274 out: t4_read_reg(adap, A_MPS_TRC_CFG); /* flush */
3279 * t4_get_trace_filter - query one of the tracing filters
3280 * @adap: the adapter
3281 * @tp: the current trace filter parameters
3282 * @idx: which trace filter to query
3283 * @enabled: non-zero if the filter is enabled
3285 * Returns the current settings of one of the HW tracing filters.
3287 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
3291 int i, ofst = idx * 4;
3292 u32 data_reg, mask_reg;
3294 ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
3295 ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
3297 *enabled = !!(ctla & F_TFEN);
3298 tp->snap_len = G_TFCAPTUREMAX(ctlb);
3299 tp->min_len = G_TFMINPKTSIZE(ctlb);
3300 tp->skip_ofst = G_TFOFFSET(ctla);
3301 tp->skip_len = G_TFLENGTH(ctla);
3302 tp->invert = !!(ctla & F_TFINVERTMATCH);
3303 tp->port = G_TFPORT(ctla);
3305 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
3306 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
3307 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
3309 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3310 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
3311 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
3316 * t4_pmtx_get_stats - returns the HW stats from PMTX
3317 * @adap: the adapter
3318 * @cnt: where to store the count statistics
3319 * @cycles: where to store the cycle statistics
3321 * Returns performance statistics from PMTX.
3323 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3327 for (i = 0; i < PM_NSTATS; i++) {
3328 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
3329 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
3330 cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
3335 * t4_pmrx_get_stats - returns the HW stats from PMRX
3336 * @adap: the adapter
3337 * @cnt: where to store the count statistics
3338 * @cycles: where to store the cycle statistics
3340 * Returns performance statistics from PMRX.
3342 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3346 for (i = 0; i < PM_NSTATS; i++) {
3347 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
3348 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
3349 cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
3354 * get_mps_bg_map - return the buffer groups associated with a port
3355 * @adap: the adapter
3356 * @idx: the port index
3358 * Returns a bitmap indicating which MPS buffer groups are associated
3359 * with the given port. Bit i is set if buffer group i is used by the
3362 static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
3364 u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
3367 return idx == 0 ? 0xf : 0;
3369 return idx < 2 ? (3 << (2 * idx)) : 0;
3374 * t4_get_port_stats - collect port statistics
3375 * @adap: the adapter
3376 * @idx: the port index
3377 * @p: the stats structure to fill
3379 * Collect statistics related to the given port from HW.
3381 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
3383 u32 bgmap = get_mps_bg_map(adap, idx);
3385 #define GET_STAT(name) \
3386 t4_read_reg64(adap, PORT_REG(idx, A_MPS_PORT_STAT_##name##_L))
3387 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3389 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
3390 p->tx_octets = GET_STAT(TX_PORT_BYTES);
3391 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
3392 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
3393 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
3394 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
3395 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
3396 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
3397 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
3398 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
3399 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
3400 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
3401 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
3402 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
3403 p->tx_drop = GET_STAT(TX_PORT_DROP);
3404 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
3405 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
3406 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
3407 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
3408 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
3409 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
3410 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
3411 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
3413 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
3414 p->rx_octets = GET_STAT(RX_PORT_BYTES);
3415 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
3416 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
3417 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
3418 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
3419 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
3420 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
3421 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
3422 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
3423 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
3424 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
3425 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
3426 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
3427 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
3428 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
3429 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
3430 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
3431 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
3432 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
3433 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
3434 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
3435 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
3436 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
3437 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
3438 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
3439 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
3441 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
3442 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
3443 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
3444 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
3445 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
3446 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
3447 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
3448 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
3455 * t4_clr_port_stats - clear port statistics
3456 * @adap: the adapter
3457 * @idx: the port index
3459 * Clear HW statistics for the given port.
3461 void t4_clr_port_stats(struct adapter *adap, int idx)
3464 u32 bgmap = get_mps_bg_map(adap, idx);
3466 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
3467 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
3468 t4_write_reg(adap, PORT_REG(idx, i), 0);
3469 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
3470 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
3471 t4_write_reg(adap, PORT_REG(idx, i), 0);
3472 for (i = 0; i < 4; i++)
3473 if (bgmap & (1 << i)) {
3475 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
3477 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
3482 * t4_get_lb_stats - collect loopback port statistics
3483 * @adap: the adapter
3484 * @idx: the loopback port index
3485 * @p: the stats structure to fill
3487 * Return HW statistics for the given loopback port.
3489 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
3491 u32 bgmap = get_mps_bg_map(adap, idx);
3493 #define GET_STAT(name) \
3494 t4_read_reg64(adap, PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L))
3495 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3497 p->octets = GET_STAT(BYTES);
3498 p->frames = GET_STAT(FRAMES);
3499 p->bcast_frames = GET_STAT(BCAST);
3500 p->mcast_frames = GET_STAT(MCAST);
3501 p->ucast_frames = GET_STAT(UCAST);
3502 p->error_frames = GET_STAT(ERROR);
3504 p->frames_64 = GET_STAT(64B);
3505 p->frames_65_127 = GET_STAT(65B_127B);
3506 p->frames_128_255 = GET_STAT(128B_255B);
3507 p->frames_256_511 = GET_STAT(256B_511B);
3508 p->frames_512_1023 = GET_STAT(512B_1023B);
3509 p->frames_1024_1518 = GET_STAT(1024B_1518B);
3510 p->frames_1519_max = GET_STAT(1519B_MAX);
3511 p->drop = t4_read_reg(adap, PORT_REG(idx,
3512 A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES));
3514 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
3515 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
3516 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
3517 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
3518 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
3519 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
3520 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
3521 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
3528 * t4_wol_magic_enable - enable/disable magic packet WoL
3529 * @adap: the adapter
3530 * @port: the physical port index
3531 * @addr: MAC address expected in magic packets, %NULL to disable
3533 * Enables/disables magic packet wake-on-LAN for the selected port.
3535 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
3539 t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO),
3540 (addr[2] << 24) | (addr[3] << 16) |
3541 (addr[4] << 8) | addr[5]);
3542 t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI),
3543 (addr[0] << 8) | addr[1]);
3545 t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2), F_MAGICEN,
3546 V_MAGICEN(addr != NULL));
3550 * t4_wol_pat_enable - enable/disable pattern-based WoL
3551 * @adap: the adapter
3552 * @port: the physical port index
3553 * @map: bitmap of which HW pattern filters to set
3554 * @mask0: byte mask for bytes 0-63 of a packet
3555 * @mask1: byte mask for bytes 64-127 of a packet
3556 * @crc: Ethernet CRC for selected bytes
3557 * @enable: enable/disable switch
3559 * Sets the pattern filters indicated in @map to mask out the bytes
3560 * specified in @mask0/@mask1 in received packets and compare the CRC of
3561 * the resulting packet against @crc. If @enable is %true pattern-based
3562 * WoL is enabled, otherwise disabled.
3564 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
3565 u64 mask0, u64 mask1, unsigned int crc, bool enable)
3570 t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2),
3577 #define EPIO_REG(name) PORT_REG(port, A_XGMAC_PORT_EPIO_##name)
3579 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
3580 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
3581 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
3583 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
3587 /* write byte masks */
3588 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
3589 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
3590 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
3591 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
3595 t4_write_reg(adap, EPIO_REG(DATA0), crc);
3596 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
3597 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
3598 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
3603 t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2), 0, F_PATEN);
3608 * t4_mk_filtdelwr - create a delete filter WR
3609 * @ftid: the filter ID
3610 * @wr: the filter work request to populate
3611 * @qid: ingress queue to receive the delete notification
3613 * Creates a filter work request to delete the supplied filter. If @qid is
3614 * negative the delete notification is suppressed.
3616 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
3618 memset(wr, 0, sizeof(*wr));
3619 wr->op_pkd = htonl(V_FW_WR_OP(FW_FILTER_WR));
3620 wr->len16_pkd = htonl(V_FW_WR_LEN16(sizeof(*wr) / 16));
3621 wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
3622 V_FW_FILTER_WR_NOREPLY(qid < 0));
3623 wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
3625 wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
3628 #define INIT_CMD(var, cmd, rd_wr) do { \
3629 (var).op_to_write = htonl(V_FW_CMD_OP(FW_##cmd##_CMD) | \
3630 F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
3631 (var).retval_len16 = htonl(FW_LEN16(var)); \
3635 * t4_mdio_rd - read a PHY register through MDIO
3636 * @adap: the adapter
3637 * @mbox: mailbox to use for the FW command
3638 * @phy_addr: the PHY address
3639 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
3640 * @reg: the register to read
3641 * @valp: where to store the value
3643 * Issues a FW command through the given mailbox to read a PHY register.
3645 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
3646 unsigned int mmd, unsigned int reg, unsigned int *valp)
3649 struct fw_ldst_cmd c;
3651 memset(&c, 0, sizeof(c));
3652 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3653 F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
3654 c.cycles_to_len16 = htonl(FW_LEN16(c));
3655 c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
3656 V_FW_LDST_CMD_MMD(mmd));
3657 c.u.mdio.raddr = htons(reg);
3659 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3661 *valp = ntohs(c.u.mdio.rval);
3666 * t4_mdio_wr - write a PHY register through MDIO
3667 * @adap: the adapter
3668 * @mbox: mailbox to use for the FW command
3669 * @phy_addr: the PHY address
3670 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
3671 * @reg: the register to write
3672 * @valp: value to write
3674 * Issues a FW command through the given mailbox to write a PHY register.
3676 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
3677 unsigned int mmd, unsigned int reg, unsigned int val)
3679 struct fw_ldst_cmd c;
3681 memset(&c, 0, sizeof(c));
3682 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3683 F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
3684 c.cycles_to_len16 = htonl(FW_LEN16(c));
3685 c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
3686 V_FW_LDST_CMD_MMD(mmd));
3687 c.u.mdio.raddr = htons(reg);
3688 c.u.mdio.rval = htons(val);
3690 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3694 * t4_sge_ctxt_rd - read an SGE context through FW
3695 * @adap: the adapter
3696 * @mbox: mailbox to use for the FW command
3697 * @cid: the context id
3698 * @ctype: the context type
3699 * @data: where to store the context data
3701 * Issues a FW command through the given mailbox to read an SGE context.
3703 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
3704 enum ctxt_type ctype, u32 *data)
3707 struct fw_ldst_cmd c;
3709 if (ctype == CTXT_EGRESS)
3710 ret = FW_LDST_ADDRSPC_SGE_EGRC;
3711 else if (ctype == CTXT_INGRESS)
3712 ret = FW_LDST_ADDRSPC_SGE_INGC;
3713 else if (ctype == CTXT_FLM)
3714 ret = FW_LDST_ADDRSPC_SGE_FLMC;
3716 ret = FW_LDST_ADDRSPC_SGE_CONMC;
3718 memset(&c, 0, sizeof(c));
3719 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3720 F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(ret));
3721 c.cycles_to_len16 = htonl(FW_LEN16(c));
3722 c.u.idctxt.physid = htonl(cid);
3724 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3726 data[0] = ntohl(c.u.idctxt.ctxt_data0);
3727 data[1] = ntohl(c.u.idctxt.ctxt_data1);
3728 data[2] = ntohl(c.u.idctxt.ctxt_data2);
3729 data[3] = ntohl(c.u.idctxt.ctxt_data3);
3730 data[4] = ntohl(c.u.idctxt.ctxt_data4);
3731 data[5] = ntohl(c.u.idctxt.ctxt_data5);
3737 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
3738 * @adap: the adapter
3739 * @cid: the context id
3740 * @ctype: the context type
3741 * @data: where to store the context data
3743 * Reads an SGE context directly, bypassing FW. This is only for
3744 * debugging when FW is unavailable.
3746 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
3751 t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
3752 ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
3754 for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
3755 *data++ = t4_read_reg(adap, i);
3760 * t4_fw_hello - establish communication with FW
3761 * @adap: the adapter
3762 * @mbox: mailbox to use for the FW command
3763 * @evt_mbox: mailbox to receive async FW events
3764 * @master: specifies the caller's willingness to be the device master
3765 * @state: returns the current device state
3767 * Issues a command to establish communication with FW.
3769 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
3770 enum dev_master master, enum dev_state *state)
3773 struct fw_hello_cmd c;
3775 unsigned int master_mbox;
3776 int retries = FW_CMD_HELLO_RETRIES;
3779 memset(&c, 0, sizeof(c));
3780 INIT_CMD(c, HELLO, WRITE);
3781 c.err_to_clearinit = htonl(
3782 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
3783 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
3784 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
3785 M_FW_HELLO_CMD_MBMASTER) |
3786 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
3787 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
3788 F_FW_HELLO_CMD_CLEARINIT);
3791 * Issue the HELLO command to the firmware. If it's not successful
3792 * but indicates that we got a "busy" or "timeout" condition, retry
3793 * the HELLO until we exhaust our retry limit.
3795 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3796 if (ret != FW_SUCCESS) {
3797 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
3802 v = ntohl(c.err_to_clearinit);
3803 master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
3805 if (v & F_FW_HELLO_CMD_ERR)
3806 *state = DEV_STATE_ERR;
3807 else if (v & F_FW_HELLO_CMD_INIT)
3808 *state = DEV_STATE_INIT;
3810 *state = DEV_STATE_UNINIT;
3814 * If we're not the Master PF then we need to wait around for the
3815 * Master PF Driver to finish setting up the adapter.
3817 * Note that we also do this wait if we're a non-Master-capable PF and
3818 * there is no current Master PF; a Master PF may show up momentarily
3819 * and we wouldn't want to fail pointlessly. (This can happen when an
3820 * OS loads lots of different drivers rapidly at the same time). In
3821 * this case, the Master PF returned by the firmware will be
3822 * M_PCIE_FW_MASTER so the test below will work ...
3824 if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
3825 master_mbox != mbox) {
3826 int waiting = FW_CMD_HELLO_TIMEOUT;
3829 * Wait for the firmware to either indicate an error or
3830 * initialized state. If we see either of these we bail out
3831 * and report the issue to the caller. If we exhaust the
3832 * "hello timeout" and we haven't exhausted our retries, try
3833 * again. Otherwise bail with a timeout error.
3842 * If neither Error nor Initialialized are indicated
3843 * by the firmware keep waiting till we exhaust our
3844 * timeout ... and then retry if we haven't exhausted
3847 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
3848 if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
3859 * We either have an Error or Initialized condition
3860 * report errors preferentially.
3863 if (pcie_fw & F_PCIE_FW_ERR)
3864 *state = DEV_STATE_ERR;
3865 else if (pcie_fw & F_PCIE_FW_INIT)
3866 *state = DEV_STATE_INIT;
3870 * If we arrived before a Master PF was selected and
3871 * there's not a valid Master PF, grab its identity
3874 if (master_mbox == M_PCIE_FW_MASTER &&
3875 (pcie_fw & F_PCIE_FW_MASTER_VLD))
3876 master_mbox = G_PCIE_FW_MASTER(pcie_fw);
3885 * t4_fw_bye - end communication with FW
3886 * @adap: the adapter
3887 * @mbox: mailbox to use for the FW command
3889 * Issues a command to terminate communication with FW.
3891 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
3893 struct fw_bye_cmd c;
3895 memset(&c, 0, sizeof(c));
3896 INIT_CMD(c, BYE, WRITE);
3897 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3901 * t4_fw_reset - issue a reset to FW
3902 * @adap: the adapter
3903 * @mbox: mailbox to use for the FW command
3904 * @reset: specifies the type of reset to perform
3906 * Issues a reset command of the specified type to FW.
3908 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
3910 struct fw_reset_cmd c;
3912 memset(&c, 0, sizeof(c));
3913 INIT_CMD(c, RESET, WRITE);
3914 c.val = htonl(reset);
3915 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3919 * t4_fw_initialize - ask FW to initialize the device
3920 * @adap: the adapter
3921 * @mbox: mailbox to use for the FW command
3923 * Issues a command to FW to partially initialize the device. This
3924 * performs initialization that generally doesn't depend on user input.
3926 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
3928 struct fw_initialize_cmd c;
3930 memset(&c, 0, sizeof(c));
3931 INIT_CMD(c, INITIALIZE, WRITE);
3932 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3936 * t4_query_params - query FW or device parameters
3937 * @adap: the adapter
3938 * @mbox: mailbox to use for the FW command
3941 * @nparams: the number of parameters
3942 * @params: the parameter names
3943 * @val: the parameter values
3945 * Reads the value of FW or device parameters. Up to 7 parameters can be
3948 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3949 unsigned int vf, unsigned int nparams, const u32 *params,
3953 struct fw_params_cmd c;
3954 __be32 *p = &c.param[0].mnem;
3959 memset(&c, 0, sizeof(c));
3960 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
3961 F_FW_CMD_READ | V_FW_PARAMS_CMD_PFN(pf) |
3962 V_FW_PARAMS_CMD_VFN(vf));
3963 c.retval_len16 = htonl(FW_LEN16(c));
3965 for (i = 0; i < nparams; i++, p += 2)
3966 *p = htonl(*params++);
3968 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3970 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
3976 * t4_set_params - sets FW or device parameters
3977 * @adap: the adapter
3978 * @mbox: mailbox to use for the FW command
3981 * @nparams: the number of parameters
3982 * @params: the parameter names
3983 * @val: the parameter values
3985 * Sets the value of FW or device parameters. Up to 7 parameters can be
3986 * specified at once.
3988 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3989 unsigned int vf, unsigned int nparams, const u32 *params,
3992 struct fw_params_cmd c;
3993 __be32 *p = &c.param[0].mnem;
3998 memset(&c, 0, sizeof(c));
3999 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4000 F_FW_CMD_WRITE | V_FW_PARAMS_CMD_PFN(pf) |
4001 V_FW_PARAMS_CMD_VFN(vf));
4002 c.retval_len16 = htonl(FW_LEN16(c));
4005 *p++ = htonl(*params++);
4006 *p++ = htonl(*val++);
4009 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4013 * t4_cfg_pfvf - configure PF/VF resource limits
4014 * @adap: the adapter
4015 * @mbox: mailbox to use for the FW command
4016 * @pf: the PF being configured
4017 * @vf: the VF being configured
4018 * @txq: the max number of egress queues
4019 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
4020 * @rxqi: the max number of interrupt-capable ingress queues
4021 * @rxq: the max number of interruptless ingress queues
4022 * @tc: the PCI traffic class
4023 * @vi: the max number of virtual interfaces
4024 * @cmask: the channel access rights mask for the PF/VF
4025 * @pmask: the port access rights mask for the PF/VF
4026 * @nexact: the maximum number of exact MPS filters
4027 * @rcaps: read capabilities
4028 * @wxcaps: write/execute capabilities
4030 * Configures resource limits and capabilities for a physical or virtual
4033 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
4034 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
4035 unsigned int rxqi, unsigned int rxq, unsigned int tc,
4036 unsigned int vi, unsigned int cmask, unsigned int pmask,
4037 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
4039 struct fw_pfvf_cmd c;
4041 memset(&c, 0, sizeof(c));
4042 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
4043 F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
4044 V_FW_PFVF_CMD_VFN(vf));
4045 c.retval_len16 = htonl(FW_LEN16(c));
4046 c.niqflint_niq = htonl(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
4047 V_FW_PFVF_CMD_NIQ(rxq));
4048 c.type_to_neq = htonl(V_FW_PFVF_CMD_CMASK(cmask) |
4049 V_FW_PFVF_CMD_PMASK(pmask) |
4050 V_FW_PFVF_CMD_NEQ(txq));
4051 c.tc_to_nexactf = htonl(V_FW_PFVF_CMD_TC(tc) | V_FW_PFVF_CMD_NVI(vi) |
4052 V_FW_PFVF_CMD_NEXACTF(nexact));
4053 c.r_caps_to_nethctrl = htonl(V_FW_PFVF_CMD_R_CAPS(rcaps) |
4054 V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
4055 V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
4056 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4060 * t4_alloc_vi - allocate a virtual interface
4061 * @adap: the adapter
4062 * @mbox: mailbox to use for the FW command
4063 * @port: physical port associated with the VI
4064 * @pf: the PF owning the VI
4065 * @vf: the VF owning the VI
4066 * @nmac: number of MAC addresses needed (1 to 5)
4067 * @mac: the MAC addresses of the VI
4068 * @rss_size: size of RSS table slice associated with this VI
4070 * Allocates a virtual interface for the given physical port. If @mac is
4071 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
4072 * @mac should be large enough to hold @nmac Ethernet addresses, they are
4073 * stored consecutively so the space needed is @nmac * 6 bytes.
4074 * Returns a negative error number or the non-negative VI id.
4076 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
4077 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
4078 unsigned int *rss_size)
4083 memset(&c, 0, sizeof(c));
4084 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
4085 F_FW_CMD_WRITE | F_FW_CMD_EXEC |
4086 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
4087 c.alloc_to_len16 = htonl(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
4088 c.portid_pkd = V_FW_VI_CMD_PORTID(port);
4091 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4096 memcpy(mac, c.mac, sizeof(c.mac));
4099 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
4101 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
4103 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
4105 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
4109 *rss_size = G_FW_VI_CMD_RSSSIZE(ntohs(c.rsssize_pkd));
4110 return G_FW_VI_CMD_VIID(ntohs(c.type_to_viid));
4114 * t4_free_vi - free a virtual interface
4115 * @adap: the adapter
4116 * @mbox: mailbox to use for the FW command
4117 * @pf: the PF owning the VI
4118 * @vf: the VF owning the VI
4119 * @viid: virtual interface identifiler
4121 * Free a previously allocated virtual interface.
4123 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
4124 unsigned int vf, unsigned int viid)
4128 memset(&c, 0, sizeof(c));
4129 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) |
4132 V_FW_VI_CMD_PFN(pf) |
4133 V_FW_VI_CMD_VFN(vf));
4134 c.alloc_to_len16 = htonl(F_FW_VI_CMD_FREE | FW_LEN16(c));
4135 c.type_to_viid = htons(V_FW_VI_CMD_VIID(viid));
4137 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4141 * t4_set_rxmode - set Rx properties of a virtual interface
4142 * @adap: the adapter
4143 * @mbox: mailbox to use for the FW command
4145 * @mtu: the new MTU or -1
4146 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
4147 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
4148 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
4149 * @vlanex: 1 to enable HVLAN extraction, 0 to disable it, -1 no change
4150 * @sleep_ok: if true we may sleep while awaiting command completion
4152 * Sets Rx properties of a virtual interface.
4154 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
4155 int mtu, int promisc, int all_multi, int bcast, int vlanex,
4158 struct fw_vi_rxmode_cmd c;
4160 /* convert to FW values */
4162 mtu = M_FW_VI_RXMODE_CMD_MTU;
4164 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
4166 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
4168 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
4170 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
4172 memset(&c, 0, sizeof(c));
4173 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | F_FW_CMD_REQUEST |
4174 F_FW_CMD_WRITE | V_FW_VI_RXMODE_CMD_VIID(viid));
4175 c.retval_len16 = htonl(FW_LEN16(c));
4176 c.mtu_to_vlanexen = htonl(V_FW_VI_RXMODE_CMD_MTU(mtu) |
4177 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
4178 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
4179 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
4180 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
4181 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
4185 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
4186 * @adap: the adapter
4187 * @mbox: mailbox to use for the FW command
4189 * @free: if true any existing filters for this VI id are first removed
4190 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
4191 * @addr: the MAC address(es)
4192 * @idx: where to store the index of each allocated filter
4193 * @hash: pointer to hash address filter bitmap
4194 * @sleep_ok: call is allowed to sleep
4196 * Allocates an exact-match filter for each of the supplied addresses and
4197 * sets it to the corresponding address. If @idx is not %NULL it should
4198 * have at least @naddr entries, each of which will be set to the index of
4199 * the filter allocated for the corresponding MAC address. If a filter
4200 * could not be allocated for an address its index is set to 0xffff.
4201 * If @hash is not %NULL addresses that fail to allocate an exact filter
4202 * are hashed and update the hash filter bitmap pointed at by @hash.
4204 * Returns a negative error number or the number of filters allocated.
4206 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
4207 unsigned int viid, bool free, unsigned int naddr,
4208 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
4210 int offset, ret = 0;
4211 struct fw_vi_mac_cmd c;
4212 unsigned int nfilters = 0;
4213 unsigned int rem = naddr;
4215 if (naddr > FW_CLS_TCAM_NUM_ENTRIES)
4218 for (offset = 0; offset < naddr ; /**/) {
4219 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
4221 : ARRAY_SIZE(c.u.exact));
4222 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
4223 u.exact[fw_naddr]), 16);
4224 struct fw_vi_mac_exact *p;
4227 memset(&c, 0, sizeof(c));
4228 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) |
4231 V_FW_CMD_EXEC(free) |
4232 V_FW_VI_MAC_CMD_VIID(viid));
4233 c.freemacs_to_len16 = htonl(V_FW_VI_MAC_CMD_FREEMACS(free) |
4234 V_FW_CMD_LEN16(len16));
4236 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
4237 p->valid_to_idx = htons(
4238 F_FW_VI_MAC_CMD_VALID |
4239 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
4240 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
4244 * It's okay if we run out of space in our MAC address arena.
4245 * Some of the addresses we submit may get stored so we need
4246 * to run through the reply to see what the results were ...
4248 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
4249 if (ret && ret != -FW_ENOMEM)
4252 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
4253 u16 index = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
4256 idx[offset+i] = (index >= FW_CLS_TCAM_NUM_ENTRIES
4259 if (index < FW_CLS_TCAM_NUM_ENTRIES)
4262 *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
4270 if (ret == 0 || ret == -FW_ENOMEM)
4276 * t4_change_mac - modifies the exact-match filter for a MAC address
4277 * @adap: the adapter
4278 * @mbox: mailbox to use for the FW command
4280 * @idx: index of existing filter for old value of MAC address, or -1
4281 * @addr: the new MAC address value
4282 * @persist: whether a new MAC allocation should be persistent
4283 * @add_smt: if true also add the address to the HW SMT
4285 * Modifies an exact-match filter and sets it to the new MAC address if
4286 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
4287 * latter case the address is added persistently if @persist is %true.
4289 * Note that in general it is not possible to modify the value of a given
4290 * filter so the generic way to modify an address filter is to free the one
4291 * being used by the old address value and allocate a new filter for the
4292 * new address value.
4294 * Returns a negative error number or the index of the filter with the new
4295 * MAC value. Note that this index may differ from @idx.
4297 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
4298 int idx, const u8 *addr, bool persist, bool add_smt)
4301 struct fw_vi_mac_cmd c;
4302 struct fw_vi_mac_exact *p = c.u.exact;
4304 if (idx < 0) /* new allocation */
4305 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
4306 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
4308 memset(&c, 0, sizeof(c));
4309 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
4310 F_FW_CMD_WRITE | V_FW_VI_MAC_CMD_VIID(viid));
4311 c.freemacs_to_len16 = htonl(V_FW_CMD_LEN16(1));
4312 p->valid_to_idx = htons(F_FW_VI_MAC_CMD_VALID |
4313 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
4314 V_FW_VI_MAC_CMD_IDX(idx));
4315 memcpy(p->macaddr, addr, sizeof(p->macaddr));
4317 ret = t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), &c);
4319 ret = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
4320 if (ret >= FW_CLS_TCAM_NUM_ENTRIES)
4327 * t4_set_addr_hash - program the MAC inexact-match hash filter
4328 * @adap: the adapter
4329 * @mbox: mailbox to use for the FW command
4331 * @ucast: whether the hash filter should also match unicast addresses
4332 * @vec: the value to be written to the hash filter
4333 * @sleep_ok: call is allowed to sleep
4335 * Sets the 64-bit inexact-match hash filter for a virtual interface.
4337 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
4338 bool ucast, u64 vec, bool sleep_ok)
4340 struct fw_vi_mac_cmd c;
4342 memset(&c, 0, sizeof(c));
4343 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
4344 F_FW_CMD_WRITE | V_FW_VI_ENABLE_CMD_VIID(viid));
4345 c.freemacs_to_len16 = htonl(F_FW_VI_MAC_CMD_HASHVECEN |
4346 V_FW_VI_MAC_CMD_HASHUNIEN(ucast) |
4348 c.u.hash.hashvec = cpu_to_be64(vec);
4349 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
4353 * t4_enable_vi - enable/disable a virtual interface
4354 * @adap: the adapter
4355 * @mbox: mailbox to use for the FW command
4357 * @rx_en: 1=enable Rx, 0=disable Rx
4358 * @tx_en: 1=enable Tx, 0=disable Tx
4360 * Enables/disables a virtual interface.
4362 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
4363 bool rx_en, bool tx_en)
4365 struct fw_vi_enable_cmd c;
4367 memset(&c, 0, sizeof(c));
4368 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
4369 F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
4370 c.ien_to_len16 = htonl(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
4371 V_FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
4372 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4376 * t4_identify_port - identify a VI's port by blinking its LED
4377 * @adap: the adapter
4378 * @mbox: mailbox to use for the FW command
4380 * @nblinks: how many times to blink LED at 2.5 Hz
4382 * Identifies a VI's port by blinking its LED.
4384 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
4385 unsigned int nblinks)
4387 struct fw_vi_enable_cmd c;
4389 memset(&c, 0, sizeof(c));
4390 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
4391 F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
4392 c.ien_to_len16 = htonl(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
4393 c.blinkdur = htons(nblinks);
4394 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4398 * t4_iq_start_stop - enable/disable an ingress queue and its FLs
4399 * @adap: the adapter
4400 * @mbox: mailbox to use for the FW command
4401 * @start: %true to enable the queues, %false to disable them
4402 * @pf: the PF owning the queues
4403 * @vf: the VF owning the queues
4404 * @iqid: ingress queue id
4405 * @fl0id: FL0 queue id or 0xffff if no attached FL0
4406 * @fl1id: FL1 queue id or 0xffff if no attached FL1
4408 * Starts or stops an ingress queue and its associated FLs, if any.
4410 int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
4411 unsigned int pf, unsigned int vf, unsigned int iqid,
4412 unsigned int fl0id, unsigned int fl1id)
4416 memset(&c, 0, sizeof(c));
4417 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
4418 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
4419 V_FW_IQ_CMD_VFN(vf));
4420 c.alloc_to_len16 = htonl(V_FW_IQ_CMD_IQSTART(start) |
4421 V_FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
4422 c.iqid = htons(iqid);
4423 c.fl0id = htons(fl0id);
4424 c.fl1id = htons(fl1id);
4425 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4429 * t4_iq_free - free an ingress queue and its FLs
4430 * @adap: the adapter
4431 * @mbox: mailbox to use for the FW command
4432 * @pf: the PF owning the queues
4433 * @vf: the VF owning the queues
4434 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
4435 * @iqid: ingress queue id
4436 * @fl0id: FL0 queue id or 0xffff if no attached FL0
4437 * @fl1id: FL1 queue id or 0xffff if no attached FL1
4439 * Frees an ingress queue and its associated FLs, if any.
4441 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4442 unsigned int vf, unsigned int iqtype, unsigned int iqid,
4443 unsigned int fl0id, unsigned int fl1id)
4447 memset(&c, 0, sizeof(c));
4448 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
4449 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
4450 V_FW_IQ_CMD_VFN(vf));
4451 c.alloc_to_len16 = htonl(F_FW_IQ_CMD_FREE | FW_LEN16(c));
4452 c.type_to_iqandstindex = htonl(V_FW_IQ_CMD_TYPE(iqtype));
4453 c.iqid = htons(iqid);
4454 c.fl0id = htons(fl0id);
4455 c.fl1id = htons(fl1id);
4456 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4460 * t4_eth_eq_free - free an Ethernet egress queue
4461 * @adap: the adapter
4462 * @mbox: mailbox to use for the FW command
4463 * @pf: the PF owning the queue
4464 * @vf: the VF owning the queue
4465 * @eqid: egress queue id
4467 * Frees an Ethernet egress queue.
4469 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4470 unsigned int vf, unsigned int eqid)
4472 struct fw_eq_eth_cmd c;
4474 memset(&c, 0, sizeof(c));
4475 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
4476 F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(pf) |
4477 V_FW_EQ_ETH_CMD_VFN(vf));
4478 c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
4479 c.eqid_pkd = htonl(V_FW_EQ_ETH_CMD_EQID(eqid));
4480 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4484 * t4_ctrl_eq_free - free a control egress queue
4485 * @adap: the adapter
4486 * @mbox: mailbox to use for the FW command
4487 * @pf: the PF owning the queue
4488 * @vf: the VF owning the queue
4489 * @eqid: egress queue id
4491 * Frees a control egress queue.
4493 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4494 unsigned int vf, unsigned int eqid)
4496 struct fw_eq_ctrl_cmd c;
4498 memset(&c, 0, sizeof(c));
4499 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
4500 F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(pf) |
4501 V_FW_EQ_CTRL_CMD_VFN(vf));
4502 c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
4503 c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_EQID(eqid));
4504 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4508 * t4_ofld_eq_free - free an offload egress queue
4509 * @adap: the adapter
4510 * @mbox: mailbox to use for the FW command
4511 * @pf: the PF owning the queue
4512 * @vf: the VF owning the queue
4513 * @eqid: egress queue id
4515 * Frees a control egress queue.
4517 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4518 unsigned int vf, unsigned int eqid)
4520 struct fw_eq_ofld_cmd c;
4522 memset(&c, 0, sizeof(c));
4523 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
4524 F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(pf) |
4525 V_FW_EQ_OFLD_CMD_VFN(vf));
4526 c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
4527 c.eqid_pkd = htonl(V_FW_EQ_OFLD_CMD_EQID(eqid));
4528 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4532 * t4_handle_fw_rpl - process a FW reply message
4533 * @adap: the adapter
4534 * @rpl: start of the FW message
4536 * Processes a FW message, such as link state change messages.
4538 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
4540 u8 opcode = *(const u8 *)rpl;
4542 if (opcode == FW_PORT_CMD) { /* link/module state change message */
4543 int speed = 0, fc = 0, i;
4544 const struct fw_port_cmd *p = (const void *)rpl;
4545 int chan = G_FW_PORT_CMD_PORTID(ntohl(p->op_to_portid));
4546 struct port_info *pi = NULL;
4547 struct link_config *lc;
4548 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
4549 int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
4550 u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
4552 if (stat & F_FW_PORT_CMD_RXPAUSE)
4554 if (stat & F_FW_PORT_CMD_TXPAUSE)
4556 if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
4558 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
4560 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
4561 speed = SPEED_10000;
4563 for_each_port(adap, i) {
4564 pi = adap2pinfo(adap, i);
4565 if (pi->tx_chan == chan)
4570 if (link_ok != lc->link_ok || speed != lc->speed ||
4571 fc != lc->fc) { /* something changed */
4572 lc->link_ok = link_ok;
4575 t4_os_link_changed(adap, i, link_ok);
4577 if (mod != pi->mod_type) {
4579 t4_os_portmod_changed(adap, i);
4586 * get_pci_mode - determine a card's PCI mode
4587 * @adapter: the adapter
4588 * @p: where to store the PCI settings
4590 * Determines a card's PCI mode and associated parameters, such as speed
4593 static void __devinit get_pci_mode(struct adapter *adapter,
4594 struct pci_params *p)
4599 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
4601 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
4602 p->speed = val & PCI_EXP_LNKSTA_CLS;
4603 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
4608 * init_link_config - initialize a link's SW state
4609 * @lc: structure holding the link state
4610 * @caps: link capabilities
4612 * Initializes the SW state maintained for each link, including the link's
4613 * capabilities and default speed/flow-control/autonegotiation settings.
4615 static void __devinit init_link_config(struct link_config *lc,
4618 lc->supported = caps;
4619 lc->requested_speed = 0;
4621 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
4622 if (lc->supported & FW_PORT_CAP_ANEG) {
4623 lc->advertising = lc->supported & ADVERT_MASK;
4624 lc->autoneg = AUTONEG_ENABLE;
4625 lc->requested_fc |= PAUSE_AUTONEG;
4627 lc->advertising = 0;
4628 lc->autoneg = AUTONEG_DISABLE;
4632 static int __devinit wait_dev_ready(struct adapter *adap)
4636 whoami = t4_read_reg(adap, A_PL_WHOAMI);
4638 if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
4642 whoami = t4_read_reg(adap, A_PL_WHOAMI);
4643 return (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS
4647 static int __devinit get_flash_params(struct adapter *adapter)
4652 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
4654 ret = sf1_read(adapter, 3, 0, 1, &info);
4655 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
4659 if ((info & 0xff) != 0x20) /* not a Numonix flash */
4661 info >>= 16; /* log2 of size */
4662 if (info >= 0x14 && info < 0x18)
4663 adapter->params.sf_nsec = 1 << (info - 16);
4664 else if (info == 0x18)
4665 adapter->params.sf_nsec = 64;
4668 adapter->params.sf_size = 1 << info;
4672 static void __devinit set_pcie_completion_timeout(struct adapter *adapter,
4678 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
4680 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
4683 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
4688 * t4_prep_adapter - prepare SW and HW for operation
4689 * @adapter: the adapter
4690 * @reset: if true perform a HW reset
4692 * Initialize adapter SW state for the various HW modules, set initial
4693 * values for some adapter tunables, take PHYs out of reset, and
4694 * initialize the MDIO interface.
4696 int __devinit t4_prep_adapter(struct adapter *adapter)
4700 ret = wait_dev_ready(adapter);
4704 get_pci_mode(adapter, &adapter->params.pci);
4706 adapter->params.rev = t4_read_reg(adapter, A_PL_REV);
4707 adapter->params.pci.vpd_cap_addr =
4708 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
4710 ret = get_flash_params(adapter);
4714 ret = get_vpd_params(adapter, &adapter->params.vpd);
4718 if (t4_read_reg(adapter, A_SGE_PC0_REQ_BIST_CMD) != 0xffffffff) {
4719 adapter->params.cim_la_size = 2 * CIMLA_SIZE;
4721 adapter->params.cim_la_size = CIMLA_SIZE;
4724 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
4727 * Default port and clock for debugging in case we can't reach FW.
4729 adapter->params.nports = 1;
4730 adapter->params.portvec = 1;
4731 adapter->params.vpd.cclk = 50000;
4733 /* Set pci completion timeout value to 4 seconds. */
4734 set_pcie_completion_timeout(adapter, 0xd);
4738 int __devinit t4_port_init(struct port_info *p, int mbox, int pf, int vf)
4742 struct fw_port_cmd c;
4743 unsigned int rss_size;
4744 adapter_t *adap = p->adapter;
4746 memset(&c, 0, sizeof(c));
4748 for (i = 0, j = -1; i <= p->port_id; i++) {
4751 } while ((adap->params.portvec & (1 << j)) == 0);
4754 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) |
4755 F_FW_CMD_REQUEST | F_FW_CMD_READ |
4756 V_FW_PORT_CMD_PORTID(j));
4757 c.action_to_len16 = htonl(
4758 V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
4760 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4764 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
4771 p->rss_size = rss_size;
4772 t4_os_set_hw_addr(adap, p->port_id, addr);
4774 ret = ntohl(c.u.info.lstatus_to_modtype);
4775 p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
4776 G_FW_PORT_CMD_MDIOADDR(ret) : -1;
4777 p->port_type = G_FW_PORT_CMD_PTYPE(ret);
4778 p->mod_type = G_FW_PORT_CMD_MODTYPE(ret);
4780 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));