2 * Copyright (c) 2012 Chelsio Communications, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
34 #include "t4_regs_values.h"
35 #include "firmware/t4fw_interface.h"
38 #define msleep(x) pause("t4hw", (x) * hz / 1000)
41 * t4_wait_op_done_val - wait until an operation is completed
42 * @adapter: the adapter performing the operation
43 * @reg: the register to check for completion
44 * @mask: a single-bit field within @reg that indicates completion
45 * @polarity: the value of the field when the operation is completed
46 * @attempts: number of check iterations
47 * @delay: delay in usecs between iterations
48 * @valp: where to store the value of the register at completion time
50 * Wait until an operation is completed by checking a bit in a register
51 * up to @attempts times. If @valp is not NULL the value of the register
52 * at the time it indicated completion is stored there. Returns 0 if the
53 * operation completes and -EAGAIN otherwise.
55 int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
56 int polarity, int attempts, int delay, u32 *valp)
59 u32 val = t4_read_reg(adapter, reg);
61 if (!!(val & mask) == polarity) {
74 * t4_set_reg_field - set a register field to a value
75 * @adapter: the adapter to program
76 * @addr: the register address
77 * @mask: specifies the portion of the register to modify
78 * @val: the new value for the register field
80 * Sets a register field specified by the supplied mask to the
83 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
86 u32 v = t4_read_reg(adapter, addr) & ~mask;
88 t4_write_reg(adapter, addr, v | val);
89 (void) t4_read_reg(adapter, addr); /* flush */
93 * t4_read_indirect - read indirectly addressed registers
95 * @addr_reg: register holding the indirect address
96 * @data_reg: register holding the value of the indirect register
97 * @vals: where the read register values are stored
98 * @nregs: how many indirect registers to read
99 * @start_idx: index of first indirect register to read
101 * Reads registers that are accessed indirectly through an address/data
104 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
105 unsigned int data_reg, u32 *vals, unsigned int nregs,
106 unsigned int start_idx)
109 t4_write_reg(adap, addr_reg, start_idx);
110 *vals++ = t4_read_reg(adap, data_reg);
116 * t4_write_indirect - write indirectly addressed registers
118 * @addr_reg: register holding the indirect addresses
119 * @data_reg: register holding the value for the indirect registers
120 * @vals: values to write
121 * @nregs: how many indirect registers to write
122 * @start_idx: address of first indirect register to write
124 * Writes a sequential block of registers that are accessed indirectly
125 * through an address/data register pair.
127 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
128 unsigned int data_reg, const u32 *vals,
129 unsigned int nregs, unsigned int start_idx)
132 t4_write_reg(adap, addr_reg, start_idx++);
133 t4_write_reg(adap, data_reg, *vals++);
138 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
139 * mechanism. This guarantees that we get the real value even if we're
140 * operating within a Virtual Machine and the Hypervisor is trapping our
141 * Configuration Space accesses.
143 u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
145 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ,
146 F_ENABLE | F_LOCALCFG | V_FUNCTION(adap->pf) |
148 return t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
152 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
154 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
157 for ( ; nflit; nflit--, mbox_addr += 8)
158 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
162 * Handle a FW assertion reported in a mailbox.
164 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
166 struct fw_debug_cmd asrt;
168 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
169 CH_ALERT(adap, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
170 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
171 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
174 #define X_CIM_PF_NOACCESS 0xeeeeeeee
176 * t4_wr_mbox_meat - send a command to FW through the given mailbox
178 * @mbox: index of the mailbox to use
179 * @cmd: the command to write
180 * @size: command length in bytes
181 * @rpl: where to optionally store the reply
182 * @sleep_ok: if true we may sleep while awaiting command completion
184 * Sends the given command to FW through the selected mailbox and waits
185 * for the FW to execute the command. If @rpl is not %NULL it is used to
186 * store the FW's reply to the command. The command and its optional
187 * reply are of the same length. Some FW commands like RESET and
188 * INITIALIZE can take a considerable amount of time to execute.
189 * @sleep_ok determines whether we may sleep while awaiting the response.
190 * If sleeping is allowed we use progressive backoff otherwise we spin.
192 * The return value is 0 on success or a negative errno on failure. A
193 * failure can happen either because we are not able to execute the
194 * command or FW executes it but signals an error. In the latter case
195 * the return value is the error code indicated by FW (negated).
197 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
198 void *rpl, bool sleep_ok)
201 * We delay in small increments at first in an effort to maintain
202 * responsiveness for simple, fast executing commands but then back
203 * off to larger delays to a maximum retry delay.
205 static const int delay[] = {
206 1, 1, 3, 5, 10, 10, 20, 50, 100
211 int i, ms, delay_idx;
212 const __be64 *p = cmd;
213 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
214 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
216 if ((size & 15) || size > MBOX_LEN)
219 v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
220 for (i = 0; v == X_MBOWNER_NONE && i < 3; i++)
221 v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
223 if (v != X_MBOWNER_PL)
224 return v ? -EBUSY : -ETIMEDOUT;
226 for (i = 0; i < size; i += 8, p++)
227 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
229 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
230 t4_read_reg(adap, ctl_reg); /* flush write */
235 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
237 ms = delay[delay_idx]; /* last element may repeat */
238 if (delay_idx < ARRAY_SIZE(delay) - 1)
244 v = t4_read_reg(adap, ctl_reg);
245 if (v == X_CIM_PF_NOACCESS)
247 if (G_MBOWNER(v) == X_MBOWNER_PL) {
248 if (!(v & F_MBMSGVALID)) {
249 t4_write_reg(adap, ctl_reg,
250 V_MBOWNER(X_MBOWNER_NONE));
254 res = t4_read_reg64(adap, data_reg);
255 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
256 fw_asrt(adap, data_reg);
257 res = V_FW_CMD_RETVAL(EIO);
259 get_mbox_rpl(adap, rpl, size / 8, data_reg);
260 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
261 return -G_FW_CMD_RETVAL((int)res);
265 CH_ERR(adap, "command %#x in mailbox %d timed out\n",
266 *(const u8 *)cmd, mbox);
271 * t4_mc_read - read from MC through backdoor accesses
273 * @addr: address of first byte requested
274 * @data: 64 bytes of data containing the requested address
275 * @ecc: where to store the corresponding 64-bit ECC word
277 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
278 * that covers the requested address @addr. If @parity is not %NULL it
279 * is assigned the 64-bit ECC word for the read data.
281 int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
285 if (t4_read_reg(adap, A_MC_BIST_CMD) & F_START_BIST)
287 t4_write_reg(adap, A_MC_BIST_CMD_ADDR, addr & ~0x3fU);
288 t4_write_reg(adap, A_MC_BIST_CMD_LEN, 64);
289 t4_write_reg(adap, A_MC_BIST_DATA_PATTERN, 0xc);
290 t4_write_reg(adap, A_MC_BIST_CMD, V_BIST_OPCODE(1) | F_START_BIST |
292 i = t4_wait_op_done(adap, A_MC_BIST_CMD, F_START_BIST, 0, 10, 1);
296 #define MC_DATA(i) MC_BIST_STATUS_REG(A_MC_BIST_STATUS_RDATA, i)
298 for (i = 15; i >= 0; i--)
299 *data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
301 *ecc = t4_read_reg64(adap, MC_DATA(16));
307 * t4_edc_read - read from EDC through backdoor accesses
309 * @idx: which EDC to access
310 * @addr: address of first byte requested
311 * @data: 64 bytes of data containing the requested address
312 * @ecc: where to store the corresponding 64-bit ECC word
314 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
315 * that covers the requested address @addr. If @parity is not %NULL it
316 * is assigned the 64-bit ECC word for the read data.
318 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
323 if (t4_read_reg(adap, A_EDC_BIST_CMD + idx) & F_START_BIST)
325 t4_write_reg(adap, A_EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
326 t4_write_reg(adap, A_EDC_BIST_CMD_LEN + idx, 64);
327 t4_write_reg(adap, A_EDC_BIST_DATA_PATTERN + idx, 0xc);
328 t4_write_reg(adap, A_EDC_BIST_CMD + idx,
329 V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
330 i = t4_wait_op_done(adap, A_EDC_BIST_CMD + idx, F_START_BIST, 0, 10, 1);
334 #define EDC_DATA(i) (EDC_BIST_STATUS_REG(A_EDC_BIST_STATUS_RDATA, i) + idx)
336 for (i = 15; i >= 0; i--)
337 *data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
339 *ecc = t4_read_reg64(adap, EDC_DATA(16));
345 * t4_mem_read - read EDC 0, EDC 1 or MC into buffer
347 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
348 * @addr: address within indicated memory type
349 * @len: amount of memory to read
350 * @buf: host memory buffer
352 * Reads an [almost] arbitrary memory region in the firmware: the
353 * firmware memory address, length and host buffer must be aligned on
354 * 32-bit boudaries. The memory is returned as a raw byte sequence from
355 * the firmware's memory. If this memory contains data structures which
356 * contain multi-byte integers, it's the callers responsibility to
357 * perform appropriate byte order conversions.
359 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
362 u32 pos, start, end, offset;
366 * Argument sanity checks ...
368 if ((addr & 0x3) || (len & 0x3))
372 * The underlaying EDC/MC read routines read 64 bytes at a time so we
373 * need to round down the start and round up the end. We'll start
374 * copying out of the first line at (addr - start) a word at a time.
376 start = addr & ~(64-1);
377 end = (addr + len + 64-1) & ~(64-1);
378 offset = (addr - start)/sizeof(__be32);
380 for (pos = start; pos < end; pos += 64, offset = 0) {
384 * Read the chip's memory block and bail if there's an error.
387 ret = t4_mc_read(adap, pos, data, NULL);
389 ret = t4_edc_read(adap, mtype, pos, data, NULL);
394 * Copy the data into the caller's memory buffer.
396 while (offset < 16 && len > 0) {
397 *buf++ = data[offset++];
398 len -= sizeof(__be32);
406 * Partial EEPROM Vital Product Data structure. Includes only the ID and
418 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
420 #define EEPROM_MAX_RD_POLL 40
421 #define EEPROM_MAX_WR_POLL 6
422 #define EEPROM_STAT_ADDR 0x7bfc
423 #define VPD_BASE 0x400
424 #define VPD_BASE_OLD 0
426 #define VPD_INFO_FLD_HDR_SIZE 3
429 * t4_seeprom_read - read a serial EEPROM location
430 * @adapter: adapter to read
431 * @addr: EEPROM virtual address
432 * @data: where to store the read data
434 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
435 * VPD capability. Note that this function must be called with a virtual
438 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
441 int attempts = EEPROM_MAX_RD_POLL;
442 unsigned int base = adapter->params.pci.vpd_cap_addr;
444 if (addr >= EEPROMVSIZE || (addr & 3))
447 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
450 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
451 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
453 if (!(val & PCI_VPD_ADDR_F)) {
454 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
457 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
458 *data = le32_to_cpu(*data);
463 * t4_seeprom_write - write a serial EEPROM location
464 * @adapter: adapter to write
465 * @addr: virtual EEPROM address
466 * @data: value to write
468 * Write a 32-bit word to a location in serial EEPROM using the card's PCI
469 * VPD capability. Note that this function must be called with a virtual
472 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
475 int attempts = EEPROM_MAX_WR_POLL;
476 unsigned int base = adapter->params.pci.vpd_cap_addr;
478 if (addr >= EEPROMVSIZE || (addr & 3))
481 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
483 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
484 (u16)addr | PCI_VPD_ADDR_F);
487 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
488 } while ((val & PCI_VPD_ADDR_F) && --attempts);
490 if (val & PCI_VPD_ADDR_F) {
491 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
498 * t4_eeprom_ptov - translate a physical EEPROM address to virtual
499 * @phys_addr: the physical EEPROM address
500 * @fn: the PCI function number
501 * @sz: size of function-specific area
503 * Translate a physical EEPROM address to virtual. The first 1K is
504 * accessed through virtual addresses starting at 31K, the rest is
505 * accessed through virtual addresses starting at 0.
507 * The mapping is as follows:
508 * [0..1K) -> [31K..32K)
509 * [1K..1K+A) -> [ES-A..ES)
510 * [1K+A..ES) -> [0..ES-A-1K)
512 * where A = @fn * @sz, and ES = EEPROM size.
514 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
517 if (phys_addr < 1024)
518 return phys_addr + (31 << 10);
519 if (phys_addr < 1024 + fn)
520 return EEPROMSIZE - fn + phys_addr - 1024;
521 if (phys_addr < EEPROMSIZE)
522 return phys_addr - 1024 - fn;
527 * t4_seeprom_wp - enable/disable EEPROM write protection
528 * @adapter: the adapter
529 * @enable: whether to enable or disable write protection
531 * Enables or disables write protection on the serial EEPROM.
533 int t4_seeprom_wp(struct adapter *adapter, int enable)
535 return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
539 * get_vpd_keyword_val - Locates an information field keyword in the VPD
540 * @v: Pointer to buffered vpd data structure
541 * @kw: The keyword to search for
543 * Returns the value of the information field keyword or
546 static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
549 unsigned int offset , len;
550 const u8 *buf = &v->id_tag;
551 const u8 *vpdr_len = &v->vpdr_tag;
552 offset = sizeof(struct t4_vpd_hdr);
553 len = (u16)vpdr_len[1] + ((u16)vpdr_len[2] << 8);
555 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
559 for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
560 if(memcmp(buf + i , kw , 2) == 0){
561 i += VPD_INFO_FLD_HDR_SIZE;
565 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
573 * get_vpd_params - read VPD parameters from VPD EEPROM
574 * @adapter: adapter to read
575 * @p: where to store the parameters
577 * Reads card parameters stored in VPD EEPROM.
579 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
583 u8 vpd[VPD_LEN], csum;
584 const struct t4_vpd_hdr *v;
587 * Card information normally starts at VPD_BASE but early cards had
590 ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd));
591 addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
593 for (i = 0; i < sizeof(vpd); i += 4) {
594 ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i));
598 v = (const struct t4_vpd_hdr *)vpd;
600 #define FIND_VPD_KW(var,name) do { \
601 var = get_vpd_keyword_val(v , name); \
603 CH_ERR(adapter, "missing VPD keyword " name "\n"); \
608 FIND_VPD_KW(i, "RV");
609 for (csum = 0; i >= 0; i--)
613 CH_ERR(adapter, "corrupted VPD EEPROM, actual csum %u\n", csum);
616 FIND_VPD_KW(ec, "EC");
617 FIND_VPD_KW(sn, "SN");
618 FIND_VPD_KW(pn, "PN");
619 FIND_VPD_KW(na, "NA");
622 memcpy(p->id, v->id_data, ID_LEN);
624 memcpy(p->ec, vpd + ec, EC_LEN);
626 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
627 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
629 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
630 strstrip((char *)p->pn);
631 memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
632 strstrip((char *)p->na);
637 /* serial flash and firmware constants and flash config file constants */
639 SF_ATTEMPTS = 10, /* max retries for SF operations */
641 /* flash command opcodes */
642 SF_PROG_PAGE = 2, /* program page */
643 SF_WR_DISABLE = 4, /* disable writes */
644 SF_RD_STATUS = 5, /* read status register */
645 SF_WR_ENABLE = 6, /* enable writes */
646 SF_RD_DATA_FAST = 0xb, /* read flash */
647 SF_RD_ID = 0x9f, /* read ID */
648 SF_ERASE_SECTOR = 0xd8, /* erase sector */
652 * sf1_read - read data from the serial flash
653 * @adapter: the adapter
654 * @byte_cnt: number of bytes to read
655 * @cont: whether another operation will be chained
656 * @lock: whether to lock SF for PL access only
657 * @valp: where to store the read data
659 * Reads up to 4 bytes of data from the serial flash. The location of
660 * the read needs to be specified prior to calling this by issuing the
661 * appropriate commands to the serial flash.
663 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
668 if (!byte_cnt || byte_cnt > 4)
670 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
672 t4_write_reg(adapter, A_SF_OP,
673 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
674 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
676 *valp = t4_read_reg(adapter, A_SF_DATA);
681 * sf1_write - write data to the serial flash
682 * @adapter: the adapter
683 * @byte_cnt: number of bytes to write
684 * @cont: whether another operation will be chained
685 * @lock: whether to lock SF for PL access only
686 * @val: value to write
688 * Writes up to 4 bytes of data to the serial flash. The location of
689 * the write needs to be specified prior to calling this by issuing the
690 * appropriate commands to the serial flash.
692 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
695 if (!byte_cnt || byte_cnt > 4)
697 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
699 t4_write_reg(adapter, A_SF_DATA, val);
700 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
701 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
702 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
706 * flash_wait_op - wait for a flash operation to complete
707 * @adapter: the adapter
708 * @attempts: max number of polls of the status register
709 * @delay: delay between polls in ms
711 * Wait for a flash operation to complete by polling the status register.
713 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
719 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
720 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
732 * t4_read_flash - read words from serial flash
733 * @adapter: the adapter
734 * @addr: the start address for the read
735 * @nwords: how many 32-bit words to read
736 * @data: where to store the read data
737 * @byte_oriented: whether to store data as bytes or as words
739 * Read the specified number of 32-bit words from the serial flash.
740 * If @byte_oriented is set the read data is stored as a byte array
741 * (i.e., big-endian), otherwise as 32-bit words in the platform's
744 int t4_read_flash(struct adapter *adapter, unsigned int addr,
745 unsigned int nwords, u32 *data, int byte_oriented)
749 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
752 addr = swab32(addr) | SF_RD_DATA_FAST;
754 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
755 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
758 for ( ; nwords; nwords--, data++) {
759 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
761 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
765 *data = htonl(*data);
771 * t4_write_flash - write up to a page of data to the serial flash
772 * @adapter: the adapter
773 * @addr: the start address to write
774 * @n: length of data to write in bytes
775 * @data: the data to write
776 * @byte_oriented: whether to store data as bytes or as words
778 * Writes up to a page of data (256 bytes) to the serial flash starting
779 * at the given address. All the data must be written to the same page.
780 * If @byte_oriented is set the write data is stored as byte stream
781 * (i.e. matches what on disk), otherwise in big-endian.
783 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
784 unsigned int n, const u8 *data, int byte_oriented)
787 u32 buf[SF_PAGE_SIZE / 4];
788 unsigned int i, c, left, val, offset = addr & 0xff;
790 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
793 val = swab32(addr) | SF_PROG_PAGE;
795 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
796 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
799 for (left = n; left; left -= c) {
801 for (val = 0, i = 0; i < c; ++i)
802 val = (val << 8) + *data++;
807 ret = sf1_write(adapter, c, c != left, 1, val);
811 ret = flash_wait_op(adapter, 8, 1);
815 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
817 /* Read the page to verify the write succeeded */
818 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
823 if (memcmp(data - n, (u8 *)buf + offset, n)) {
824 CH_ERR(adapter, "failed to correctly write the flash page "
831 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
836 * t4_get_fw_version - read the firmware version
837 * @adapter: the adapter
838 * @vers: where to place the version
840 * Reads the FW version from flash.
842 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
844 return t4_read_flash(adapter,
845 FLASH_FW_START + offsetof(struct fw_hdr, fw_ver), 1,
850 * t4_get_tp_version - read the TP microcode version
851 * @adapter: the adapter
852 * @vers: where to place the version
854 * Reads the TP microcode version from flash.
856 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
858 return t4_read_flash(adapter, FLASH_FW_START + offsetof(struct fw_hdr,
864 * t4_check_fw_version - check if the FW is compatible with this driver
865 * @adapter: the adapter
867 * Checks if an adapter's FW is compatible with the driver. Returns 0
868 * if there's exact match, a negative error if the version could not be
869 * read or there's a major version mismatch, and a positive value if the
870 * expected major version is found but there's a minor version mismatch.
872 int t4_check_fw_version(struct adapter *adapter)
874 int ret, major, minor, micro;
876 ret = t4_get_fw_version(adapter, &adapter->params.fw_vers);
878 ret = t4_get_tp_version(adapter, &adapter->params.tp_vers);
882 major = G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers);
883 minor = G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers);
884 micro = G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers);
886 if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
887 CH_ERR(adapter, "card FW has major version %u, driver wants "
888 "%u\n", major, FW_VERSION_MAJOR);
892 if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
893 return 0; /* perfect match */
895 /* Minor/micro version mismatch. Report it but often it's OK. */
900 * t4_flash_erase_sectors - erase a range of flash sectors
901 * @adapter: the adapter
902 * @start: the first sector to erase
903 * @end: the last sector to erase
905 * Erases the sectors in the given inclusive range.
907 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
911 while (start <= end) {
912 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
913 (ret = sf1_write(adapter, 4, 0, 1,
914 SF_ERASE_SECTOR | (start << 8))) != 0 ||
915 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
916 CH_ERR(adapter, "erase of flash sector %d failed, "
917 "error %d\n", start, ret);
922 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
927 * t4_flash_cfg_addr - return the address of the flash configuration file
928 * @adapter: the adapter
930 * Return the address within the flash where the Firmware Configuration
933 unsigned int t4_flash_cfg_addr(struct adapter *adapter)
935 if (adapter->params.sf_size == 0x100000)
936 return FLASH_FPGA_CFG_START;
938 return FLASH_CFG_START;
942 * t4_load_cfg - download config file
944 * @cfg_data: the cfg text file to write
945 * @size: text file size
947 * Write the supplied config text file to the card's serial flash.
949 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
953 unsigned int flash_cfg_start_sec;
954 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
956 addr = t4_flash_cfg_addr(adap);
957 flash_cfg_start_sec = addr / SF_SEC_SIZE;
959 if (size > FLASH_CFG_MAX_SIZE) {
960 CH_ERR(adap, "cfg file too large, max is %u bytes\n",
965 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
967 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
968 flash_cfg_start_sec + i - 1);
970 * If size == 0 then we're simply erasing the FLASH sectors associated
971 * with the on-adapter Firmware Configuration File.
973 if (ret || size == 0)
976 /* this will write to the flash up to SF_PAGE_SIZE at a time */
977 for (i = 0; i< size; i+= SF_PAGE_SIZE) {
978 if ( (size - i) < SF_PAGE_SIZE)
982 ret = t4_write_flash(adap, addr, n, cfg_data, 1);
986 addr += SF_PAGE_SIZE;
987 cfg_data += SF_PAGE_SIZE;
992 CH_ERR(adap, "config file %s failed %d\n",
993 (size == 0 ? "clear" : "download"), ret);
999 * t4_load_fw - download firmware
1000 * @adap: the adapter
1001 * @fw_data: the firmware image to write
1004 * Write the supplied firmware image to the card's serial flash.
1006 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1011 u8 first_page[SF_PAGE_SIZE];
1012 const u32 *p = (const u32 *)fw_data;
1013 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
1014 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1017 CH_ERR(adap, "FW image has no data\n");
1021 CH_ERR(adap, "FW image size not multiple of 512 bytes\n");
1024 if (ntohs(hdr->len512) * 512 != size) {
1025 CH_ERR(adap, "FW image size differs from size in FW header\n");
1028 if (size > FLASH_FW_MAX_SIZE) {
1029 CH_ERR(adap, "FW image too large, max is %u bytes\n",
1034 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1035 csum += ntohl(p[i]);
1037 if (csum != 0xffffffff) {
1038 CH_ERR(adap, "corrupted firmware image, checksum %#x\n",
1043 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
1044 ret = t4_flash_erase_sectors(adap, FLASH_FW_START_SEC,
1045 FLASH_FW_START_SEC + i - 1);
1050 * We write the correct version at the end so the driver can see a bad
1051 * version if the FW write fails. Start by writing a copy of the
1052 * first page with a bad version.
1054 memcpy(first_page, fw_data, SF_PAGE_SIZE);
1055 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
1056 ret = t4_write_flash(adap, FLASH_FW_START, SF_PAGE_SIZE, first_page, 1);
1060 addr = FLASH_FW_START;
1061 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1062 addr += SF_PAGE_SIZE;
1063 fw_data += SF_PAGE_SIZE;
1064 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
1069 ret = t4_write_flash(adap,
1070 FLASH_FW_START + offsetof(struct fw_hdr, fw_ver),
1071 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
1074 CH_ERR(adap, "firmware download failed, error %d\n", ret);
1078 /* BIOS boot headers */
1079 typedef struct pci_expansion_rom_header {
1080 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
1081 u8 reserved[22]; /* Reserved per processor Architecture data */
1082 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
1083 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
1085 /* Legacy PCI Expansion ROM Header */
1086 typedef struct legacy_pci_expansion_rom_header {
1087 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
1088 u8 size512; /* Current Image Size in units of 512 bytes */
1089 u8 initentry_point[4];
1090 u8 cksum; /* Checksum computed on the entire Image */
1091 u8 reserved[16]; /* Reserved */
1092 u8 pcir_offset[2]; /* Offset to PCI Data Struture */
1093 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
1095 /* EFI PCI Expansion ROM Header */
1096 typedef struct efi_pci_expansion_rom_header {
1097 u8 signature[2]; // ROM signature. The value 0xaa55
1098 u8 initialization_size[2]; /* Units 512. Includes this header */
1099 u8 efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
1100 u8 efi_subsystem[2]; /* Subsystem value for EFI image header */
1101 u8 efi_machine_type[2]; /* Machine type from EFI image header */
1102 u8 compression_type[2]; /* Compression type. */
1104 * Compression type definition
1107 * 0x2-0xFFFF: Reserved
1109 u8 reserved[8]; /* Reserved */
1110 u8 efi_image_header_offset[2]; /* Offset to EFI Image */
1111 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
1112 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
1114 /* PCI Data Structure Format */
1115 typedef struct pcir_data_structure { /* PCI Data Structure */
1116 u8 signature[4]; /* Signature. The string "PCIR" */
1117 u8 vendor_id[2]; /* Vendor Identification */
1118 u8 device_id[2]; /* Device Identification */
1119 u8 vital_product[2]; /* Pointer to Vital Product Data */
1120 u8 length[2]; /* PCIR Data Structure Length */
1121 u8 revision; /* PCIR Data Structure Revision */
1122 u8 class_code[3]; /* Class Code */
1123 u8 image_length[2]; /* Image Length. Multiple of 512B */
1124 u8 code_revision[2]; /* Revision Level of Code/Data */
1125 u8 code_type; /* Code Type. */
1127 * PCI Expansion ROM Code Types
1128 * 0x00: Intel IA-32, PC-AT compatible. Legacy
1129 * 0x01: Open Firmware standard for PCI. FCODE
1130 * 0x02: Hewlett-Packard PA RISC. HP reserved
1131 * 0x03: EFI Image. EFI
1132 * 0x04-0xFF: Reserved.
1134 u8 indicator; /* Indicator. Identifies the last image in the ROM */
1135 u8 reserved[2]; /* Reserved */
1136 } pcir_data_t; /* PCI__DATA_STRUCTURE */
1138 /* BOOT constants */
1140 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
1141 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */
1142 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */
1143 BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
1144 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment */
1145 VENDOR_ID = 0x1425, /* Vendor ID */
1146 PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
1150 * modify_device_id - Modifies the device ID of the Boot BIOS image
1151 * @adatper: the device ID to write.
1152 * @boot_data: the boot image to modify.
1154 * Write the supplied device ID to the boot BIOS image.
1156 static void modify_device_id(int device_id, u8 *boot_data)
1158 legacy_pci_exp_rom_header_t *header;
1159 pcir_data_t *pcir_header;
1163 * Loop through all chained images and change the device ID's
1166 header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
1167 pcir_header = (pcir_data_t *) &boot_data[cur_header +
1168 le16_to_cpu(*(u16*)header->pcir_offset)];
1171 * Only modify the Device ID if code type is Legacy or HP.
1172 * 0x00: Okay to modify
1173 * 0x01: FCODE. Do not be modify
1174 * 0x03: Okay to modify
1175 * 0x04-0xFF: Do not modify
1177 if (pcir_header->code_type == 0x00) {
1182 * Modify Device ID to match current adatper
1184 *(u16*) pcir_header->device_id = device_id;
1187 * Set checksum temporarily to 0.
1188 * We will recalculate it later.
1190 header->cksum = 0x0;
1193 * Calculate and update checksum
1195 for (i = 0; i < (header->size512 * 512); i++)
1196 csum += (u8)boot_data[cur_header + i];
1199 * Invert summed value to create the checksum
1200 * Writing new checksum value directly to the boot data
1202 boot_data[cur_header + 7] = -csum;
1204 } else if (pcir_header->code_type == 0x03) {
1207 * Modify Device ID to match current adatper
1209 *(u16*) pcir_header->device_id = device_id;
1215 * Check indicator element to identify if this is the last
1218 if (pcir_header->indicator & 0x80)
1222 * Move header pointer up to the next image in the ROM.
1224 cur_header += header->size512 * 512;
1229 * t4_load_boot - download boot flash
1230 * @adapter: the adapter
1231 * @boot_data: the boot image to write
1232 * @boot_addr: offset in flash to write boot_data
1235 * Write the supplied boot image to the card's serial flash.
1236 * The boot image has the following sections: a 28-byte header and the
1239 int t4_load_boot(struct adapter *adap, u8 *boot_data,
1240 unsigned int boot_addr, unsigned int size)
1242 pci_exp_rom_header_t *header;
1244 pcir_data_t *pcir_header;
1248 unsigned int boot_sector = boot_addr * 1024;
1249 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1252 * Make sure the boot image does not encroach on the firmware region
1254 if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
1255 CH_ERR(adap, "boot image encroaching on firmware region\n");
1260 * Number of sectors spanned
1262 i = DIV_ROUND_UP(size ? size : FLASH_BOOTCFG_MAX_SIZE,
1264 ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
1265 (boot_sector >> 16) + i - 1);
1268 * If size == 0 then we're simply erasing the FLASH sectors associated
1269 * with the on-adapter option ROM file
1271 if (ret || (size == 0))
1274 /* Get boot header */
1275 header = (pci_exp_rom_header_t *)boot_data;
1276 pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
1277 /* PCIR Data Structure */
1278 pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
1281 * Perform some primitive sanity testing to avoid accidentally
1282 * writing garbage over the boot sectors. We ought to check for
1283 * more but it's not worth it for now ...
1285 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
1286 CH_ERR(adap, "boot image too small/large\n");
1291 * Check BOOT ROM header signature
1293 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
1294 CH_ERR(adap, "Boot image missing signature\n");
1299 * Check PCI header signature
1301 if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
1302 CH_ERR(adap, "PCI header missing signature\n");
1307 * Check Vendor ID matches Chelsio ID
1309 if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
1310 CH_ERR(adap, "Vendor ID missing signature\n");
1315 * Retrieve adapter's device ID
1317 t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
1318 /* Want to deal with PF 0 so I strip off PF 4 indicator */
1319 device_id = (device_id & 0xff) | 0x4000;
1322 * Check PCIE Device ID
1324 if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
1326 * Change the device ID in the Boot BIOS image to match
1327 * the Device ID of the current adapter.
1329 modify_device_id(device_id, boot_data);
1333 * Skip over the first SF_PAGE_SIZE worth of data and write it after
1334 * we finish copying the rest of the boot image. This will ensure
1335 * that the BIOS boot header will only be written if the boot image
1336 * was written in full.
1339 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1340 addr += SF_PAGE_SIZE;
1341 boot_data += SF_PAGE_SIZE;
1342 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
1347 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, boot_data, 0);
1351 CH_ERR(adap, "boot image download failed, error %d\n", ret);
1356 * t4_read_cimq_cfg - read CIM queue configuration
1357 * @adap: the adapter
1358 * @base: holds the queue base addresses in bytes
1359 * @size: holds the queue sizes in bytes
1360 * @thres: holds the queue full thresholds in bytes
1362 * Returns the current configuration of the CIM queues, starting with
1363 * the IBQs, then the OBQs.
1365 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
1369 for (i = 0; i < CIM_NUM_IBQ; i++) {
1370 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
1372 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1373 *base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1374 *size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1375 *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
1377 for (i = 0; i < CIM_NUM_OBQ; i++) {
1378 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1380 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1381 *base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1382 *size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1387 * t4_read_cim_ibq - read the contents of a CIM inbound queue
1388 * @adap: the adapter
1389 * @qid: the queue index
1390 * @data: where to store the queue contents
1391 * @n: capacity of @data in 32-bit words
1393 * Reads the contents of the selected CIM queue starting at address 0 up
1394 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
1395 * error and the number of 32-bit words actually read on success.
1397 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1401 const unsigned int nwords = CIM_IBQ_SIZE * 4;
1403 if (qid > 5 || (n & 3))
1406 addr = qid * nwords;
1410 for (i = 0; i < n; i++, addr++) {
1411 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
1413 err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
1417 *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
1419 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
1424 * t4_read_cim_obq - read the contents of a CIM outbound queue
1425 * @adap: the adapter
1426 * @qid: the queue index
1427 * @data: where to store the queue contents
1428 * @n: capacity of @data in 32-bit words
1430 * Reads the contents of the selected CIM queue starting at address 0 up
1431 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
1432 * error and the number of 32-bit words actually read on success.
1434 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1437 unsigned int addr, v, nwords;
1439 if (qid > 5 || (n & 3))
1442 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1443 V_QUENUMSELECT(qid));
1444 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1446 addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */
1447 nwords = G_CIMQSIZE(v) * 64; /* same */
1451 for (i = 0; i < n; i++, addr++) {
1452 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
1454 err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
1458 *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
1460 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
1466 CIM_CTL_BASE = 0x2000,
1467 CIM_PBT_ADDR_BASE = 0x2800,
1468 CIM_PBT_LRF_BASE = 0x3000,
1469 CIM_PBT_DATA_BASE = 0x3800
1473 * t4_cim_read - read a block from CIM internal address space
1474 * @adap: the adapter
1475 * @addr: the start address within the CIM address space
1476 * @n: number of words to read
1477 * @valp: where to store the result
1479 * Reads a block of 4-byte words from the CIM intenal address space.
1481 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
1486 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1489 for ( ; !ret && n--; addr += 4) {
1490 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
1491 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1494 *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
1500 * t4_cim_write - write a block into CIM internal address space
1501 * @adap: the adapter
1502 * @addr: the start address within the CIM address space
1503 * @n: number of words to write
1504 * @valp: set of values to write
1506 * Writes a block of 4-byte words into the CIM intenal address space.
1508 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
1509 const unsigned int *valp)
1513 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1516 for ( ; !ret && n--; addr += 4) {
1517 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
1518 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
1519 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1525 static int t4_cim_write1(struct adapter *adap, unsigned int addr, unsigned int val)
1527 return t4_cim_write(adap, addr, 1, &val);
1531 * t4_cim_ctl_read - read a block from CIM control region
1532 * @adap: the adapter
1533 * @addr: the start address within the CIM control region
1534 * @n: number of words to read
1535 * @valp: where to store the result
1537 * Reads a block of 4-byte words from the CIM control region.
1539 int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
1542 return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
1546 * t4_cim_read_la - read CIM LA capture buffer
1547 * @adap: the adapter
1548 * @la_buf: where to store the LA data
1549 * @wrptr: the HW write pointer within the capture buffer
1551 * Reads the contents of the CIM LA buffer with the most recent entry at
1552 * the end of the returned data and with the entry at @wrptr first.
1553 * We try to leave the LA in the running state we find it in.
1555 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
1558 unsigned int cfg, val, idx;
1560 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
1564 if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */
1565 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
1570 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1574 idx = G_UPDBGLAWRPTR(val);
1578 for (i = 0; i < adap->params.cim_la_size; i++) {
1579 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1580 V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
1583 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1586 if (val & F_UPDBGLARDEN) {
1590 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
1593 idx = (idx + 1) & M_UPDBGLARDPTR;
1596 if (cfg & F_UPDBGLAEN) {
1597 int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1598 cfg & ~F_UPDBGLARDEN);
1605 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
1606 unsigned int *pif_req_wrptr,
1607 unsigned int *pif_rsp_wrptr)
1610 u32 cfg, val, req, rsp;
1612 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1613 if (cfg & F_LADBGEN)
1614 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1616 val = t4_read_reg(adap, A_CIM_DEBUGSTS);
1617 req = G_POLADBGWRPTR(val);
1618 rsp = G_PILADBGWRPTR(val);
1620 *pif_req_wrptr = req;
1622 *pif_rsp_wrptr = rsp;
1624 for (i = 0; i < CIM_PIFLA_SIZE; i++) {
1625 for (j = 0; j < 6; j++) {
1626 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
1627 V_PILADBGRDPTR(rsp));
1628 *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
1629 *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
1633 req = (req + 2) & M_POLADBGRDPTR;
1634 rsp = (rsp + 2) & M_PILADBGRDPTR;
1636 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1639 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
1644 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1645 if (cfg & F_LADBGEN)
1646 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1648 for (i = 0; i < CIM_MALA_SIZE; i++) {
1649 for (j = 0; j < 5; j++) {
1651 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
1652 V_PILADBGRDPTR(idx));
1653 *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
1654 *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
1657 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1661 * t4_tp_read_la - read TP LA capture buffer
1662 * @adap: the adapter
1663 * @la_buf: where to store the LA data
1664 * @wrptr: the HW write pointer within the capture buffer
1666 * Reads the contents of the TP LA buffer with the most recent entry at
1667 * the end of the returned data and with the entry at @wrptr first.
1668 * We leave the LA in the running state we find it in.
1670 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
1672 bool last_incomplete;
1673 unsigned int i, cfg, val, idx;
1675 cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
1676 if (cfg & F_DBGLAENABLE) /* freeze LA */
1677 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1678 adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
1680 val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
1681 idx = G_DBGLAWPTR(val);
1682 last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
1683 if (last_incomplete)
1684 idx = (idx + 1) & M_DBGLARPTR;
1689 val &= ~V_DBGLARPTR(M_DBGLARPTR);
1690 val |= adap->params.tp.la_mask;
1692 for (i = 0; i < TPLA_SIZE; i++) {
1693 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
1694 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
1695 idx = (idx + 1) & M_DBGLARPTR;
1698 /* Wipe out last entry if it isn't valid */
1699 if (last_incomplete)
1700 la_buf[TPLA_SIZE - 1] = ~0ULL;
1702 if (cfg & F_DBGLAENABLE) /* restore running state */
1703 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1704 cfg | adap->params.tp.la_mask);
1707 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
1711 for (i = 0; i < 8; i++) {
1712 u32 *p = la_buf + i;
1714 t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
1715 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
1716 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
1717 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
1718 *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
1722 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1723 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
1726 * t4_link_start - apply link configuration to MAC/PHY
1727 * @phy: the PHY to setup
1728 * @mac: the MAC to setup
1729 * @lc: the requested link configuration
1731 * Set up a port's MAC and PHY according to a desired link configuration.
1732 * - If the PHY can auto-negotiate first decide what to advertise, then
1733 * enable/disable auto-negotiation as desired, and reset.
1734 * - If the PHY does not auto-negotiate just reset it.
1735 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1736 * otherwise do it later based on the outcome of auto-negotiation.
1738 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1739 struct link_config *lc)
1741 struct fw_port_cmd c;
1742 unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
1745 if (lc->requested_fc & PAUSE_RX)
1746 fc |= FW_PORT_CAP_FC_RX;
1747 if (lc->requested_fc & PAUSE_TX)
1748 fc |= FW_PORT_CAP_FC_TX;
1750 memset(&c, 0, sizeof(c));
1751 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1752 F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1753 c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1756 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1757 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1758 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1759 } else if (lc->autoneg == AUTONEG_DISABLE) {
1760 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1761 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1763 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1765 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1769 * t4_restart_aneg - restart autonegotiation
1770 * @adap: the adapter
1771 * @mbox: mbox to use for the FW command
1772 * @port: the port id
1774 * Restarts autonegotiation for the selected port.
1776 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1778 struct fw_port_cmd c;
1780 memset(&c, 0, sizeof(c));
1781 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1782 F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1783 c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1785 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1786 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1790 unsigned int mask; /* bits to check in interrupt status */
1791 const char *msg; /* message to print or NULL */
1792 short stat_idx; /* stat counter to increment or -1 */
1793 unsigned short fatal; /* whether the condition reported is fatal */
1797 * t4_handle_intr_status - table driven interrupt handler
1798 * @adapter: the adapter that generated the interrupt
1799 * @reg: the interrupt status register to process
1800 * @acts: table of interrupt actions
1802 * A table driven interrupt handler that applies a set of masks to an
1803 * interrupt status word and performs the corresponding actions if the
1804 * interrupts described by the mask have occured. The actions include
1805 * optionally emitting a warning or alert message. The table is terminated
1806 * by an entry specifying mask 0. Returns the number of fatal interrupt
1809 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1810 const struct intr_info *acts)
1813 unsigned int mask = 0;
1814 unsigned int status = t4_read_reg(adapter, reg);
1816 for ( ; acts->mask; ++acts) {
1817 if (!(status & acts->mask))
1821 CH_ALERT(adapter, "%s (0x%x)\n",
1822 acts->msg, status & acts->mask);
1823 } else if (acts->msg)
1824 CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n",
1825 acts->msg, status & acts->mask);
1829 if (status) /* clear processed interrupts */
1830 t4_write_reg(adapter, reg, status);
1835 * Interrupt handler for the PCIE module.
1837 static void pcie_intr_handler(struct adapter *adapter)
1839 static struct intr_info sysbus_intr_info[] = {
1840 { F_RNPP, "RXNP array parity error", -1, 1 },
1841 { F_RPCP, "RXPC array parity error", -1, 1 },
1842 { F_RCIP, "RXCIF array parity error", -1, 1 },
1843 { F_RCCP, "Rx completions control array parity error", -1, 1 },
1844 { F_RFTP, "RXFT array parity error", -1, 1 },
1847 static struct intr_info pcie_port_intr_info[] = {
1848 { F_TPCP, "TXPC array parity error", -1, 1 },
1849 { F_TNPP, "TXNP array parity error", -1, 1 },
1850 { F_TFTP, "TXFT array parity error", -1, 1 },
1851 { F_TCAP, "TXCA array parity error", -1, 1 },
1852 { F_TCIP, "TXCIF array parity error", -1, 1 },
1853 { F_RCAP, "RXCA array parity error", -1, 1 },
1854 { F_OTDD, "outbound request TLP discarded", -1, 1 },
1855 { F_RDPE, "Rx data parity error", -1, 1 },
1856 { F_TDUE, "Tx uncorrectable data error", -1, 1 },
1859 static struct intr_info pcie_intr_info[] = {
1860 { F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
1861 { F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
1862 { F_MSIDATAPERR, "MSI data parity error", -1, 1 },
1863 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1864 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1865 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1866 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1867 { F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
1868 { F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
1869 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1870 { F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
1871 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1872 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1873 { F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
1874 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1875 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1876 { F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
1877 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1878 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1879 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1880 { F_FIDPERR, "PCI FID parity error", -1, 1 },
1881 { F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
1882 { F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
1883 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1884 { F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
1885 { F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
1886 { F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
1887 { F_PCIESINT, "PCI core secondary fault", -1, 1 },
1888 { F_PCIEPINT, "PCI core primary fault", -1, 1 },
1889 { F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
1896 fat = t4_handle_intr_status(adapter,
1897 A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1899 t4_handle_intr_status(adapter,
1900 A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1901 pcie_port_intr_info) +
1902 t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE, pcie_intr_info);
1904 t4_fatal_err(adapter);
1908 * TP interrupt handler.
1910 static void tp_intr_handler(struct adapter *adapter)
1912 static struct intr_info tp_intr_info[] = {
1913 { 0x3fffffff, "TP parity error", -1, 1 },
1914 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1918 if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
1919 t4_fatal_err(adapter);
1923 * SGE interrupt handler.
1925 static void sge_intr_handler(struct adapter *adapter)
1930 static struct intr_info sge_intr_info[] = {
1931 { F_ERR_CPL_EXCEED_IQE_SIZE,
1932 "SGE received CPL exceeding IQE size", -1, 1 },
1933 { F_ERR_INVALID_CIDX_INC,
1934 "SGE GTS CIDX increment too large", -1, 0 },
1935 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
1936 { F_ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
1937 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
1938 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1939 { F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1941 { F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1943 { F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1945 { F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1947 { F_ERR_ING_CTXT_PRIO,
1948 "SGE too many priority ingress contexts", -1, 0 },
1949 { F_ERR_EGR_CTXT_PRIO,
1950 "SGE too many priority egress contexts", -1, 0 },
1951 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1952 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1956 v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
1957 ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
1959 CH_ALERT(adapter, "SGE parity error (%#llx)\n",
1960 (unsigned long long)v);
1961 t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
1962 t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
1965 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
1967 err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
1968 if (err & F_ERROR_QID_VALID) {
1969 CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
1970 if (err & F_UNCAPTURED_ERROR)
1971 CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
1972 t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
1973 F_UNCAPTURED_ERROR);
1977 t4_fatal_err(adapter);
1980 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
1981 F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
1982 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
1983 F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
1986 * CIM interrupt handler.
1988 static void cim_intr_handler(struct adapter *adapter)
1990 static struct intr_info cim_intr_info[] = {
1991 { F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1992 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
1993 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
1994 { F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1995 { F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1996 { F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1997 { F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
2000 static struct intr_info cim_upintr_info[] = {
2001 { F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
2002 { F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
2003 { F_ILLWRINT, "CIM illegal write", -1, 1 },
2004 { F_ILLRDINT, "CIM illegal read", -1, 1 },
2005 { F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
2006 { F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
2007 { F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
2008 { F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
2009 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
2010 { F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
2011 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
2012 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
2013 { F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
2014 { F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
2015 { F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
2016 { F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
2017 { F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
2018 { F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
2019 { F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
2020 { F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
2021 { F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
2022 { F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
2023 { F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
2024 { F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
2025 { F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
2026 { F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
2027 { F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
2028 { F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
2034 fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
2036 t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
2039 t4_fatal_err(adapter);
2043 * ULP RX interrupt handler.
2045 static void ulprx_intr_handler(struct adapter *adapter)
2047 static struct intr_info ulprx_intr_info[] = {
2048 { F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
2049 { F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
2050 { 0x7fffff, "ULPRX parity error", -1, 1 },
2054 if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
2055 t4_fatal_err(adapter);
2059 * ULP TX interrupt handler.
2061 static void ulptx_intr_handler(struct adapter *adapter)
2063 static struct intr_info ulptx_intr_info[] = {
2064 { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
2066 { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
2068 { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
2070 { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
2072 { 0xfffffff, "ULPTX parity error", -1, 1 },
2076 if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
2077 t4_fatal_err(adapter);
2081 * PM TX interrupt handler.
2083 static void pmtx_intr_handler(struct adapter *adapter)
2085 static struct intr_info pmtx_intr_info[] = {
2086 { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
2087 { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
2088 { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
2089 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
2090 { 0xffffff0, "PMTX framing error", -1, 1 },
2091 { F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
2092 { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
2094 { F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
2095 { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
2099 if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
2100 t4_fatal_err(adapter);
2104 * PM RX interrupt handler.
2106 static void pmrx_intr_handler(struct adapter *adapter)
2108 static struct intr_info pmrx_intr_info[] = {
2109 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
2110 { 0x3ffff0, "PMRX framing error", -1, 1 },
2111 { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
2112 { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
2114 { F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
2115 { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
2119 if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
2120 t4_fatal_err(adapter);
2124 * CPL switch interrupt handler.
2126 static void cplsw_intr_handler(struct adapter *adapter)
2128 static struct intr_info cplsw_intr_info[] = {
2129 { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
2130 { F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
2131 { F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
2132 { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
2133 { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
2134 { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
2138 if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
2139 t4_fatal_err(adapter);
2143 * LE interrupt handler.
2145 static void le_intr_handler(struct adapter *adap)
2147 static struct intr_info le_intr_info[] = {
2148 { F_LIPMISS, "LE LIP miss", -1, 0 },
2149 { F_LIP0, "LE 0 LIP error", -1, 0 },
2150 { F_PARITYERR, "LE parity error", -1, 1 },
2151 { F_UNKNOWNCMD, "LE unknown command", -1, 1 },
2152 { F_REQQPARERR, "LE request queue parity error", -1, 1 },
2156 if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE, le_intr_info))
2161 * MPS interrupt handler.
2163 static void mps_intr_handler(struct adapter *adapter)
2165 static struct intr_info mps_rx_intr_info[] = {
2166 { 0xffffff, "MPS Rx parity error", -1, 1 },
2169 static struct intr_info mps_tx_intr_info[] = {
2170 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
2171 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
2172 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
2174 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
2176 { F_BUBBLE, "MPS Tx underflow", -1, 1 },
2177 { F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
2178 { F_FRMERR, "MPS Tx framing error", -1, 1 },
2181 static struct intr_info mps_trc_intr_info[] = {
2182 { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
2183 { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
2185 { F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
2188 static struct intr_info mps_stat_sram_intr_info[] = {
2189 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
2192 static struct intr_info mps_stat_tx_intr_info[] = {
2193 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
2196 static struct intr_info mps_stat_rx_intr_info[] = {
2197 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
2200 static struct intr_info mps_cls_intr_info[] = {
2201 { F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
2202 { F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
2203 { F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
2209 fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
2211 t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
2213 t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
2214 mps_trc_intr_info) +
2215 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
2216 mps_stat_sram_intr_info) +
2217 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
2218 mps_stat_tx_intr_info) +
2219 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
2220 mps_stat_rx_intr_info) +
2221 t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
2224 t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
2225 t4_read_reg(adapter, A_MPS_INT_CAUSE); /* flush */
2227 t4_fatal_err(adapter);
2230 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | F_ECC_UE_INT_CAUSE)
2233 * EDC/MC interrupt handler.
2235 static void mem_intr_handler(struct adapter *adapter, int idx)
2237 static const char name[3][5] = { "EDC0", "EDC1", "MC" };
2239 unsigned int addr, cnt_addr, v;
2241 if (idx <= MEM_EDC1) {
2242 addr = EDC_REG(A_EDC_INT_CAUSE, idx);
2243 cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
2245 addr = A_MC_INT_CAUSE;
2246 cnt_addr = A_MC_ECC_STATUS;
2249 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
2250 if (v & F_PERR_INT_CAUSE)
2251 CH_ALERT(adapter, "%s FIFO parity error\n", name[idx]);
2252 if (v & F_ECC_CE_INT_CAUSE) {
2253 u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
2255 t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
2256 CH_WARN_RATELIMIT(adapter,
2257 "%u %s correctable ECC data error%s\n",
2258 cnt, name[idx], cnt > 1 ? "s" : "");
2260 if (v & F_ECC_UE_INT_CAUSE)
2261 CH_ALERT(adapter, "%s uncorrectable ECC data error\n",
2264 t4_write_reg(adapter, addr, v);
2265 if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
2266 t4_fatal_err(adapter);
2270 * MA interrupt handler.
2272 static void ma_intr_handler(struct adapter *adapter)
2274 u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
2276 if (status & F_MEM_PERR_INT_CAUSE)
2277 CH_ALERT(adapter, "MA parity error, parity status %#x\n",
2278 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS));
2279 if (status & F_MEM_WRAP_INT_CAUSE) {
2280 v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
2281 CH_ALERT(adapter, "MA address wrap-around error by client %u to"
2282 " address %#x\n", G_MEM_WRAP_CLIENT_NUM(v),
2283 G_MEM_WRAP_ADDRESS(v) << 4);
2285 t4_write_reg(adapter, A_MA_INT_CAUSE, status);
2286 t4_fatal_err(adapter);
2290 * SMB interrupt handler.
2292 static void smb_intr_handler(struct adapter *adap)
2294 static struct intr_info smb_intr_info[] = {
2295 { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
2296 { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
2297 { F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
2301 if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
2306 * NC-SI interrupt handler.
2308 static void ncsi_intr_handler(struct adapter *adap)
2310 static struct intr_info ncsi_intr_info[] = {
2311 { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
2312 { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
2313 { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
2314 { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
2318 if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
2323 * XGMAC interrupt handler.
2325 static void xgmac_intr_handler(struct adapter *adap, int port)
2327 u32 v = t4_read_reg(adap, PORT_REG(port, A_XGMAC_PORT_INT_CAUSE));
2329 v &= F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR;
2333 if (v & F_TXFIFO_PRTY_ERR)
2334 CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n", port);
2335 if (v & F_RXFIFO_PRTY_ERR)
2336 CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n", port);
2337 t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_INT_CAUSE), v);
2342 * PL interrupt handler.
2344 static void pl_intr_handler(struct adapter *adap)
2346 static struct intr_info pl_intr_info[] = {
2347 { F_FATALPERR, "T4 fatal parity error", -1, 1 },
2348 { F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
2352 if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE, pl_intr_info))
2356 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
2357 #define GLBL_INTR_MASK (F_CIM | F_MPS | F_PL | F_PCIE | F_MC | F_EDC0 | \
2358 F_EDC1 | F_LE | F_TP | F_MA | F_PM_TX | F_PM_RX | F_ULP_RX | \
2359 F_CPL_SWITCH | F_SGE | F_ULP_TX)
2362 * t4_slow_intr_handler - control path interrupt handler
2363 * @adapter: the adapter
2365 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
2366 * The designation 'slow' is because it involves register reads, while
2367 * data interrupts typically don't involve any MMIOs.
2369 int t4_slow_intr_handler(struct adapter *adapter)
2371 u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
2373 if (!(cause & GLBL_INTR_MASK))
2376 cim_intr_handler(adapter);
2378 mps_intr_handler(adapter);
2380 ncsi_intr_handler(adapter);
2382 pl_intr_handler(adapter);
2384 smb_intr_handler(adapter);
2385 if (cause & F_XGMAC0)
2386 xgmac_intr_handler(adapter, 0);
2387 if (cause & F_XGMAC1)
2388 xgmac_intr_handler(adapter, 1);
2389 if (cause & F_XGMAC_KR0)
2390 xgmac_intr_handler(adapter, 2);
2391 if (cause & F_XGMAC_KR1)
2392 xgmac_intr_handler(adapter, 3);
2394 pcie_intr_handler(adapter);
2396 mem_intr_handler(adapter, MEM_MC);
2398 mem_intr_handler(adapter, MEM_EDC0);
2400 mem_intr_handler(adapter, MEM_EDC1);
2402 le_intr_handler(adapter);
2404 tp_intr_handler(adapter);
2406 ma_intr_handler(adapter);
2407 if (cause & F_PM_TX)
2408 pmtx_intr_handler(adapter);
2409 if (cause & F_PM_RX)
2410 pmrx_intr_handler(adapter);
2411 if (cause & F_ULP_RX)
2412 ulprx_intr_handler(adapter);
2413 if (cause & F_CPL_SWITCH)
2414 cplsw_intr_handler(adapter);
2416 sge_intr_handler(adapter);
2417 if (cause & F_ULP_TX)
2418 ulptx_intr_handler(adapter);
2420 /* Clear the interrupts just processed for which we are the master. */
2421 t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
2422 (void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
2427 * t4_intr_enable - enable interrupts
2428 * @adapter: the adapter whose interrupts should be enabled
2430 * Enable PF-specific interrupts for the calling function and the top-level
2431 * interrupt concentrator for global interrupts. Interrupts are already
2432 * enabled at each module, here we just enable the roots of the interrupt
2435 * Note: this function should be called only when the driver manages
2436 * non PF-specific interrupts from the various HW modules. Only one PCI
2437 * function at a time should be doing this.
2439 void t4_intr_enable(struct adapter *adapter)
2441 u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2443 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
2444 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
2445 F_ERR_DROPPED_DB | F_ERR_DATA_CPL_ON_HIGH_QID1 |
2446 F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
2447 F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
2448 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
2449 F_ERR_EGR_CTXT_PRIO | F_INGRESS_SIZE_ERR |
2451 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
2452 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
2456 * t4_intr_disable - disable interrupts
2457 * @adapter: the adapter whose interrupts should be disabled
2459 * Disable interrupts. We only disable the top-level interrupt
2460 * concentrators. The caller must be a PCI function managing global
2463 void t4_intr_disable(struct adapter *adapter)
2465 u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2467 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
2468 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
2472 * t4_intr_clear - clear all interrupts
2473 * @adapter: the adapter whose interrupts should be cleared
2475 * Clears all interrupts. The caller must be a PCI function managing
2476 * global interrupts.
2478 void t4_intr_clear(struct adapter *adapter)
2480 static const unsigned int cause_reg[] = {
2481 A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
2482 A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
2483 A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
2484 A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
2486 A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS, A_MA_INT_CAUSE,
2487 A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
2488 A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
2489 MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
2491 A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE,
2492 A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE,
2493 A_MPS_RX_PERR_INT_CAUSE,
2495 MYPF_REG(A_PL_PF_INT_CAUSE),
2502 for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
2503 t4_write_reg(adapter, cause_reg[i], 0xffffffff);
2505 t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
2506 (void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
2510 * hash_mac_addr - return the hash value of a MAC address
2511 * @addr: the 48-bit Ethernet MAC address
2513 * Hashes a MAC address according to the hash function used by HW inexact
2514 * (hash) address matching.
2516 static int hash_mac_addr(const u8 *addr)
2518 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
2519 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
2527 * t4_config_rss_range - configure a portion of the RSS mapping table
2528 * @adapter: the adapter
2529 * @mbox: mbox to use for the FW command
2530 * @viid: virtual interface whose RSS subtable is to be written
2531 * @start: start entry in the table to write
2532 * @n: how many table entries to write
2533 * @rspq: values for the "response queue" (Ingress Queue) lookup table
2534 * @nrspq: number of values in @rspq
2536 * Programs the selected part of the VI's RSS mapping table with the
2537 * provided values. If @nrspq < @n the supplied values are used repeatedly
2538 * until the full table range is populated.
2540 * The caller must ensure the values in @rspq are in the range allowed for
2543 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2544 int start, int n, const u16 *rspq, unsigned int nrspq)
2547 const u16 *rsp = rspq;
2548 const u16 *rsp_end = rspq + nrspq;
2549 struct fw_rss_ind_tbl_cmd cmd;
2551 memset(&cmd, 0, sizeof(cmd));
2552 cmd.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2553 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2554 V_FW_RSS_IND_TBL_CMD_VIID(viid));
2555 cmd.retval_len16 = htonl(FW_LEN16(cmd));
2559 * Each firmware RSS command can accommodate up to 32 RSS Ingress
2560 * Queue Identifiers. These Ingress Queue IDs are packed three to
2561 * a 32-bit word as 10-bit values with the upper remaining 2 bits
2565 int nq = min(n, 32);
2567 __be32 *qp = &cmd.iq0_to_iq2;
2570 * Set up the firmware RSS command header to send the next
2571 * "nq" Ingress Queue IDs to the firmware.
2573 cmd.niqid = htons(nq);
2574 cmd.startidx = htons(start);
2577 * "nq" more done for the start of the next loop.
2583 * While there are still Ingress Queue IDs to stuff into the
2584 * current firmware RSS command, retrieve them from the
2585 * Ingress Queue ID array and insert them into the command.
2589 * Grab up to the next 3 Ingress Queue IDs (wrapping
2590 * around the Ingress Queue ID array if necessary) and
2591 * insert them into the firmware RSS command at the
2592 * current 3-tuple position within the commad.
2596 int nqbuf = min(3, nq);
2599 qbuf[0] = qbuf[1] = qbuf[2] = 0;
2600 while (nqbuf && nq_packed < 32) {
2607 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
2608 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
2609 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
2613 * Send this portion of the RRS table update to the firmware;
2614 * bail out on any errors.
2616 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
2625 * t4_config_glbl_rss - configure the global RSS mode
2626 * @adapter: the adapter
2627 * @mbox: mbox to use for the FW command
2628 * @mode: global RSS mode
2629 * @flags: mode-specific flags
2631 * Sets the global RSS mode.
2633 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2636 struct fw_rss_glb_config_cmd c;
2638 memset(&c, 0, sizeof(c));
2639 c.op_to_write = htonl(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
2640 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2641 c.retval_len16 = htonl(FW_LEN16(c));
2642 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
2643 c.u.manual.mode_pkd = htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2644 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2645 c.u.basicvirtual.mode_pkd =
2646 htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2647 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2650 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2654 * t4_config_vi_rss - configure per VI RSS settings
2655 * @adapter: the adapter
2656 * @mbox: mbox to use for the FW command
2659 * @defq: id of the default RSS queue for the VI.
2661 * Configures VI-specific RSS properties.
2663 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
2664 unsigned int flags, unsigned int defq)
2666 struct fw_rss_vi_config_cmd c;
2668 memset(&c, 0, sizeof(c));
2669 c.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2670 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2671 V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
2672 c.retval_len16 = htonl(FW_LEN16(c));
2673 c.u.basicvirtual.defaultq_to_udpen = htonl(flags |
2674 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
2675 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2678 /* Read an RSS table row */
2679 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
2681 t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
2682 return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
2687 * t4_read_rss - read the contents of the RSS mapping table
2688 * @adapter: the adapter
2689 * @map: holds the contents of the RSS mapping table
2691 * Reads the contents of the RSS hash->queue mapping table.
2693 int t4_read_rss(struct adapter *adapter, u16 *map)
2698 for (i = 0; i < RSS_NENTRIES / 2; ++i) {
2699 ret = rd_rss_row(adapter, i, &val);
2702 *map++ = G_LKPTBLQUEUE0(val);
2703 *map++ = G_LKPTBLQUEUE1(val);
2709 * t4_read_rss_key - read the global RSS key
2710 * @adap: the adapter
2711 * @key: 10-entry array holding the 320-bit RSS key
2713 * Reads the global 320-bit RSS key.
2715 void t4_read_rss_key(struct adapter *adap, u32 *key)
2717 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2718 A_TP_RSS_SECRET_KEY0);
2722 * t4_write_rss_key - program one of the RSS keys
2723 * @adap: the adapter
2724 * @key: 10-entry array holding the 320-bit RSS key
2725 * @idx: which RSS key to write
2727 * Writes one of the RSS keys with the given 320-bit value. If @idx is
2728 * 0..15 the corresponding entry in the RSS key table is written,
2729 * otherwise the global RSS key is written.
2731 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
2733 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2734 A_TP_RSS_SECRET_KEY0);
2735 if (idx >= 0 && idx < 16)
2736 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
2737 V_KEYWRADDR(idx) | F_KEYWREN);
2741 * t4_read_rss_pf_config - read PF RSS Configuration Table
2742 * @adapter: the adapter
2743 * @index: the entry in the PF RSS table to read
2744 * @valp: where to store the returned value
2746 * Reads the PF RSS Configuration Table at the specified index and returns
2747 * the value found there.
2749 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp)
2751 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2752 valp, 1, A_TP_RSS_PF0_CONFIG + index);
2756 * t4_write_rss_pf_config - write PF RSS Configuration Table
2757 * @adapter: the adapter
2758 * @index: the entry in the VF RSS table to read
2759 * @val: the value to store
2761 * Writes the PF RSS Configuration Table at the specified index with the
2764 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, u32 val)
2766 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2767 &val, 1, A_TP_RSS_PF0_CONFIG + index);
2771 * t4_read_rss_vf_config - read VF RSS Configuration Table
2772 * @adapter: the adapter
2773 * @index: the entry in the VF RSS table to read
2774 * @vfl: where to store the returned VFL
2775 * @vfh: where to store the returned VFH
2777 * Reads the VF RSS Configuration Table at the specified index and returns
2778 * the (VFL, VFH) values found there.
2780 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
2786 * Request that the index'th VF Table values be read into VFL/VFH.
2788 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
2789 vrt &= ~(F_VFRDRG | V_VFWRADDR(M_VFWRADDR) | F_VFWREN | F_KEYWREN);
2790 vrt |= V_VFWRADDR(index) | F_VFRDEN;
2791 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
2794 * Grab the VFL/VFH values ...
2796 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2797 vfl, 1, A_TP_RSS_VFL_CONFIG);
2798 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2799 vfh, 1, A_TP_RSS_VFH_CONFIG);
2803 * t4_write_rss_vf_config - write VF RSS Configuration Table
2805 * @adapter: the adapter
2806 * @index: the entry in the VF RSS table to write
2807 * @vfl: the VFL to store
2808 * @vfh: the VFH to store
2810 * Writes the VF RSS Configuration Table at the specified index with the
2811 * specified (VFL, VFH) values.
2813 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
2819 * Load up VFL/VFH with the values to be written ...
2821 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2822 &vfl, 1, A_TP_RSS_VFL_CONFIG);
2823 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2824 &vfh, 1, A_TP_RSS_VFH_CONFIG);
2827 * Write the VFL/VFH into the VF Table at index'th location.
2829 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
2830 vrt &= ~(F_VFRDRG | F_VFRDEN | V_VFWRADDR(M_VFWRADDR) | F_KEYWREN);
2831 vrt |= V_VFWRADDR(index) | F_VFWREN;
2832 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
2836 * t4_read_rss_pf_map - read PF RSS Map
2837 * @adapter: the adapter
2839 * Reads the PF RSS Map register and returns its value.
2841 u32 t4_read_rss_pf_map(struct adapter *adapter)
2845 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2846 &pfmap, 1, A_TP_RSS_PF_MAP);
2851 * t4_write_rss_pf_map - write PF RSS Map
2852 * @adapter: the adapter
2853 * @pfmap: PF RSS Map value
2855 * Writes the specified value to the PF RSS Map register.
2857 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap)
2859 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2860 &pfmap, 1, A_TP_RSS_PF_MAP);
2864 * t4_read_rss_pf_mask - read PF RSS Mask
2865 * @adapter: the adapter
2867 * Reads the PF RSS Mask register and returns its value.
2869 u32 t4_read_rss_pf_mask(struct adapter *adapter)
2873 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2874 &pfmask, 1, A_TP_RSS_PF_MSK);
2879 * t4_write_rss_pf_mask - write PF RSS Mask
2880 * @adapter: the adapter
2881 * @pfmask: PF RSS Mask value
2883 * Writes the specified value to the PF RSS Mask register.
2885 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask)
2887 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2888 &pfmask, 1, A_TP_RSS_PF_MSK);
2892 * t4_set_filter_mode - configure the optional components of filter tuples
2893 * @adap: the adapter
2894 * @mode_map: a bitmap selcting which optional filter components to enable
2896 * Sets the filter mode by selecting the optional components to enable
2897 * in filter tuples. Returns 0 on success and a negative error if the
2898 * requested mode needs more bits than are available for optional
2901 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map)
2903 static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
2907 for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
2908 if (mode_map & (1 << i))
2910 if (nbits > FILTER_OPT_LEN)
2912 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map, 1,
2918 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
2919 * @adap: the adapter
2920 * @v4: holds the TCP/IP counter values
2921 * @v6: holds the TCP/IPv6 counter values
2923 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
2924 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
2926 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
2927 struct tp_tcp_stats *v6)
2929 u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
2931 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
2932 #define STAT(x) val[STAT_IDX(x)]
2933 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
2936 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
2937 ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST);
2938 v4->tcpOutRsts = STAT(OUT_RST);
2939 v4->tcpInSegs = STAT64(IN_SEG);
2940 v4->tcpOutSegs = STAT64(OUT_SEG);
2941 v4->tcpRetransSegs = STAT64(RXT_SEG);
2944 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
2945 ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST);
2946 v6->tcpOutRsts = STAT(OUT_RST);
2947 v6->tcpInSegs = STAT64(IN_SEG);
2948 v6->tcpOutSegs = STAT64(OUT_SEG);
2949 v6->tcpRetransSegs = STAT64(RXT_SEG);
2957 * t4_tp_get_err_stats - read TP's error MIB counters
2958 * @adap: the adapter
2959 * @st: holds the counter values
2961 * Returns the values of TP's error counters.
2963 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
2965 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->macInErrs,
2966 12, A_TP_MIB_MAC_IN_ERR_0);
2967 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlCongDrops,
2968 8, A_TP_MIB_TNL_CNG_DROP_0);
2969 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlTxDrops,
2970 4, A_TP_MIB_TNL_DROP_0);
2971 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->ofldVlanDrops,
2972 4, A_TP_MIB_OFD_VLN_DROP_0);
2973 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tcp6InErrs,
2974 4, A_TP_MIB_TCP_V6IN_ERR_0);
2975 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->ofldNoNeigh,
2976 2, A_TP_MIB_OFD_ARP_DROP);
2980 * t4_tp_get_proxy_stats - read TP's proxy MIB counters
2981 * @adap: the adapter
2982 * @st: holds the counter values
2984 * Returns the values of TP's proxy counters.
2986 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st)
2988 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy,
2989 4, A_TP_MIB_TNL_LPBK_0);
2993 * t4_tp_get_cpl_stats - read TP's CPL MIB counters
2994 * @adap: the adapter
2995 * @st: holds the counter values
2997 * Returns the values of TP's CPL counters.
2999 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
3001 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req,
3002 8, A_TP_MIB_CPL_IN_REQ_0);
3006 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
3007 * @adap: the adapter
3008 * @st: holds the counter values
3010 * Returns the values of TP's RDMA counters.
3012 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
3014 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_mod,
3015 2, A_TP_MIB_RQE_DFR_MOD);
3019 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
3020 * @adap: the adapter
3021 * @idx: the port index
3022 * @st: holds the counter values
3024 * Returns the values of TP's FCoE counters for the selected port.
3026 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
3027 struct tp_fcoe_stats *st)
3031 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDDP,
3032 1, A_TP_MIB_FCOE_DDP_0 + idx);
3033 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDrop,
3034 1, A_TP_MIB_FCOE_DROP_0 + idx);
3035 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
3036 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx);
3037 st->octetsDDP = ((u64)val[0] << 32) | val[1];
3041 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
3042 * @adap: the adapter
3043 * @st: holds the counter values
3045 * Returns the values of TP's counters for non-TCP directly-placed packets.
3047 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
3051 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4,
3053 st->frames = val[0];
3055 st->octets = ((u64)val[2] << 32) | val[3];
3059 * t4_read_mtu_tbl - returns the values in the HW path MTU table
3060 * @adap: the adapter
3061 * @mtus: where to store the MTU values
3062 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
3064 * Reads the HW path MTU table.
3066 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
3071 for (i = 0; i < NMTUS; ++i) {
3072 t4_write_reg(adap, A_TP_MTU_TABLE,
3073 V_MTUINDEX(0xff) | V_MTUVALUE(i));
3074 v = t4_read_reg(adap, A_TP_MTU_TABLE);
3075 mtus[i] = G_MTUVALUE(v);
3077 mtu_log[i] = G_MTUWIDTH(v);
3082 * t4_read_cong_tbl - reads the congestion control table
3083 * @adap: the adapter
3084 * @incr: where to store the alpha values
3086 * Reads the additive increments programmed into the HW congestion
3089 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
3091 unsigned int mtu, w;
3093 for (mtu = 0; mtu < NMTUS; ++mtu)
3094 for (w = 0; w < NCCTRL_WIN; ++w) {
3095 t4_write_reg(adap, A_TP_CCTRL_TABLE,
3096 V_ROWINDEX(0xffff) | (mtu << 5) | w);
3097 incr[mtu][w] = (u16)t4_read_reg(adap,
3098 A_TP_CCTRL_TABLE) & 0x1fff;
3103 * t4_read_pace_tbl - read the pace table
3104 * @adap: the adapter
3105 * @pace_vals: holds the returned values
3107 * Returns the values of TP's pace table in microseconds.
3109 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
3113 for (i = 0; i < NTX_SCHED; i++) {
3114 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
3115 v = t4_read_reg(adap, A_TP_PACE_TABLE);
3116 pace_vals[i] = dack_ticks_to_usec(adap, v);
3121 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
3122 * @adap: the adapter
3123 * @addr: the indirect TP register address
3124 * @mask: specifies the field within the register to modify
3125 * @val: new value for the field
3127 * Sets a field of an indirect TP register to the given value.
3129 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
3130 unsigned int mask, unsigned int val)
3132 t4_write_reg(adap, A_TP_PIO_ADDR, addr);
3133 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
3134 t4_write_reg(adap, A_TP_PIO_DATA, val);
3138 * init_cong_ctrl - initialize congestion control parameters
3139 * @a: the alpha values for congestion control
3140 * @b: the beta values for congestion control
3142 * Initialize the congestion control parameters.
3144 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
3146 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
3171 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
3174 b[13] = b[14] = b[15] = b[16] = 3;
3175 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
3176 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
3181 /* The minimum additive increment value for the congestion control table */
3182 #define CC_MIN_INCR 2U
3185 * t4_load_mtus - write the MTU and congestion control HW tables
3186 * @adap: the adapter
3187 * @mtus: the values for the MTU table
3188 * @alpha: the values for the congestion control alpha parameter
3189 * @beta: the values for the congestion control beta parameter
3191 * Write the HW MTU table with the supplied MTUs and the high-speed
3192 * congestion control table with the supplied alpha, beta, and MTUs.
3193 * We write the two tables together because the additive increments
3194 * depend on the MTUs.
3196 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
3197 const unsigned short *alpha, const unsigned short *beta)
3199 static const unsigned int avg_pkts[NCCTRL_WIN] = {
3200 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
3201 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
3202 28672, 40960, 57344, 81920, 114688, 163840, 229376
3207 for (i = 0; i < NMTUS; ++i) {
3208 unsigned int mtu = mtus[i];
3209 unsigned int log2 = fls(mtu);
3211 if (!(mtu & ((1 << log2) >> 2))) /* round */
3213 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
3214 V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
3216 for (w = 0; w < NCCTRL_WIN; ++w) {
3219 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3222 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3223 (w << 16) | (beta[w] << 13) | inc);
3229 * t4_set_pace_tbl - set the pace table
3230 * @adap: the adapter
3231 * @pace_vals: the pace values in microseconds
3232 * @start: index of the first entry in the HW pace table to set
3233 * @n: how many entries to set
3235 * Sets (a subset of the) HW pace table.
3237 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
3238 unsigned int start, unsigned int n)
3240 unsigned int vals[NTX_SCHED], i;
3241 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
3246 /* convert values from us to dack ticks, rounding to closest value */
3247 for (i = 0; i < n; i++, pace_vals++) {
3248 vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
3249 if (vals[i] > 0x7ff)
3251 if (*pace_vals && vals[i] == 0)
3254 for (i = 0; i < n; i++, start++)
3255 t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
3260 * t4_set_sched_bps - set the bit rate for a HW traffic scheduler
3261 * @adap: the adapter
3262 * @kbps: target rate in Kbps
3263 * @sched: the scheduler index
3265 * Configure a Tx HW scheduler for the target rate.
3267 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
3269 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3270 unsigned int clk = adap->params.vpd.cclk * 1000;
3271 unsigned int selected_cpt = 0, selected_bpt = 0;
3274 kbps *= 125; /* -> bytes */
3275 for (cpt = 1; cpt <= 255; cpt++) {
3277 bpt = (kbps + tps / 2) / tps;
3278 if (bpt > 0 && bpt <= 255) {
3280 delta = v >= kbps ? v - kbps : kbps - v;
3281 if (delta < mindelta) {
3286 } else if (selected_cpt)
3292 t4_write_reg(adap, A_TP_TM_PIO_ADDR,
3293 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3294 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3296 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3298 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3299 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3304 * t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
3305 * @adap: the adapter
3306 * @sched: the scheduler index
3307 * @ipg: the interpacket delay in tenths of nanoseconds
3309 * Set the interpacket delay for a HW packet rate scheduler.
3311 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
3313 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3315 /* convert ipg to nearest number of core clocks */
3316 ipg *= core_ticks_per_usec(adap);
3317 ipg = (ipg + 5000) / 10000;
3318 if (ipg > M_TXTIMERSEPQ0)
3321 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3322 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3324 v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
3326 v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
3327 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3328 t4_read_reg(adap, A_TP_TM_PIO_DATA);
3333 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3334 * @adap: the adapter
3335 * @sched: the scheduler index
3336 * @kbps: the byte rate in Kbps
3337 * @ipg: the interpacket delay in tenths of nanoseconds
3339 * Return the current configuration of a HW Tx scheduler.
3341 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
3344 unsigned int v, addr, bpt, cpt;
3347 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3348 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3349 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3352 bpt = (v >> 8) & 0xff;
3355 *kbps = 0; /* scheduler disabled */
3357 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
3358 *kbps = (v * bpt) / 125;
3362 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3363 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3364 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3368 *ipg = (10000 * v) / core_ticks_per_usec(adap);
3373 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
3374 * clocks. The formula is
3376 * bytes/s = bytes256 * 256 * ClkFreq / 4096
3378 * which is equivalent to
3380 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
3382 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
3384 u64 v = bytes256 * adap->params.vpd.cclk;
3386 return v * 62 + v / 2;
3390 * t4_get_chan_txrate - get the current per channel Tx rates
3391 * @adap: the adapter
3392 * @nic_rate: rates for NIC traffic
3393 * @ofld_rate: rates for offloaded traffic
3395 * Return the current Tx rates in bytes/s for NIC and offloaded traffic
3398 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
3402 v = t4_read_reg(adap, A_TP_TX_TRATE);
3403 nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
3404 nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
3405 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
3406 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
3408 v = t4_read_reg(adap, A_TP_TX_ORATE);
3409 ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
3410 ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
3411 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
3412 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
3416 * t4_set_trace_filter - configure one of the tracing filters
3417 * @adap: the adapter
3418 * @tp: the desired trace filter parameters
3419 * @idx: which filter to configure
3420 * @enable: whether to enable or disable the filter
3422 * Configures one of the tracing filters available in HW. If @enable is
3423 * %0 @tp is not examined and may be %NULL. The user is responsible to
3424 * set the single/multiple trace mode by writing to A_MPS_TRC_CFG register
3425 * by using "cxgbtool iface reg reg_addr=val" command. See t4_sniffer/
3426 * docs/readme.txt for a complete description of how to setup traceing on
3429 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, int idx,
3432 int i, ofst = idx * 4;
3433 u32 data_reg, mask_reg, cfg;
3434 u32 multitrc = F_TRCMULTIFILTER;
3437 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
3442 * TODO - After T4 data book is updated, specify the exact
3445 * See T4 data book - MPS section for a complete description
3446 * of the below if..else handling of A_MPS_TRC_CFG register
3449 cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
3450 if (cfg & F_TRCMULTIFILTER) {
3452 * If multiple tracers are enabled, then maximum
3453 * capture size is 2.5KB (FIFO size of a single channel)
3454 * minus 2 flits for CPL_TRACE_PKT header.
3456 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
3461 * If multiple tracers are disabled, to avoid deadlocks
3462 * maximum packet capture size of 9600 bytes is recommended.
3463 * Also in this mode, only trace0 can be enabled and running.
3466 if (tp->snap_len > 9600 || idx)
3470 if (tp->port > 11 || tp->invert > 1 || tp->skip_len > M_TFLENGTH ||
3471 tp->skip_ofst > M_TFOFFSET || tp->min_len > M_TFMINPKTSIZE)
3474 /* stop the tracer we'll be changing */
3475 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
3477 idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
3478 data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
3479 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
3481 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3482 t4_write_reg(adap, data_reg, tp->data[i]);
3483 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
3485 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
3486 V_TFCAPTUREMAX(tp->snap_len) |
3487 V_TFMINPKTSIZE(tp->min_len));
3488 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
3489 V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) |
3490 V_TFPORT(tp->port) | F_TFEN | V_TFINVERTMATCH(tp->invert));
3496 * t4_get_trace_filter - query one of the tracing filters
3497 * @adap: the adapter
3498 * @tp: the current trace filter parameters
3499 * @idx: which trace filter to query
3500 * @enabled: non-zero if the filter is enabled
3502 * Returns the current settings of one of the HW tracing filters.
3504 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
3508 int i, ofst = idx * 4;
3509 u32 data_reg, mask_reg;
3511 ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
3512 ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
3514 *enabled = !!(ctla & F_TFEN);
3515 tp->snap_len = G_TFCAPTUREMAX(ctlb);
3516 tp->min_len = G_TFMINPKTSIZE(ctlb);
3517 tp->skip_ofst = G_TFOFFSET(ctla);
3518 tp->skip_len = G_TFLENGTH(ctla);
3519 tp->invert = !!(ctla & F_TFINVERTMATCH);
3520 tp->port = G_TFPORT(ctla);
3522 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
3523 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
3524 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
3526 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3527 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
3528 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
3533 * t4_pmtx_get_stats - returns the HW stats from PMTX
3534 * @adap: the adapter
3535 * @cnt: where to store the count statistics
3536 * @cycles: where to store the cycle statistics
3538 * Returns performance statistics from PMTX.
3540 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3544 for (i = 0; i < PM_NSTATS; i++) {
3545 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
3546 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
3547 cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
3552 * t4_pmrx_get_stats - returns the HW stats from PMRX
3553 * @adap: the adapter
3554 * @cnt: where to store the count statistics
3555 * @cycles: where to store the cycle statistics
3557 * Returns performance statistics from PMRX.
3559 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3563 for (i = 0; i < PM_NSTATS; i++) {
3564 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
3565 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
3566 cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
3571 * get_mps_bg_map - return the buffer groups associated with a port
3572 * @adap: the adapter
3573 * @idx: the port index
3575 * Returns a bitmap indicating which MPS buffer groups are associated
3576 * with the given port. Bit i is set if buffer group i is used by the
3579 static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
3581 u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
3584 return idx == 0 ? 0xf : 0;
3586 return idx < 2 ? (3 << (2 * idx)) : 0;
3591 * t4_get_port_stats_offset - collect port stats relative to a previous
3593 * @adap: The adapter
3595 * @stats: Current stats to fill
3596 * @offset: Previous stats snapshot
3598 void t4_get_port_stats_offset(struct adapter *adap, int idx,
3599 struct port_stats *stats,
3600 struct port_stats *offset)
3605 t4_get_port_stats(adap, idx, stats);
3606 for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
3607 i < (sizeof(struct port_stats)/sizeof(u64)) ;
3613 * t4_get_port_stats - collect port statistics
3614 * @adap: the adapter
3615 * @idx: the port index
3616 * @p: the stats structure to fill
3618 * Collect statistics related to the given port from HW.
3620 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
3622 u32 bgmap = get_mps_bg_map(adap, idx);
3624 #define GET_STAT(name) \
3625 t4_read_reg64(adap, PORT_REG(idx, A_MPS_PORT_STAT_##name##_L))
3626 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3628 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
3629 p->tx_octets = GET_STAT(TX_PORT_BYTES);
3630 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
3631 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
3632 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
3633 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
3634 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
3635 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
3636 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
3637 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
3638 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
3639 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
3640 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
3641 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
3642 p->tx_drop = GET_STAT(TX_PORT_DROP);
3643 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
3644 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
3645 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
3646 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
3647 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
3648 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
3649 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
3650 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
3652 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
3653 p->rx_octets = GET_STAT(RX_PORT_BYTES);
3654 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
3655 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
3656 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
3657 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
3658 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
3659 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
3660 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
3661 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
3662 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
3663 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
3664 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
3665 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
3666 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
3667 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
3668 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
3669 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
3670 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
3671 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
3672 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
3673 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
3674 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
3675 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
3676 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
3677 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
3678 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
3680 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
3681 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
3682 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
3683 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
3684 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
3685 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
3686 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
3687 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
3694 * t4_clr_port_stats - clear port statistics
3695 * @adap: the adapter
3696 * @idx: the port index
3698 * Clear HW statistics for the given port.
3700 void t4_clr_port_stats(struct adapter *adap, int idx)
3703 u32 bgmap = get_mps_bg_map(adap, idx);
3705 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
3706 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
3707 t4_write_reg(adap, PORT_REG(idx, i), 0);
3708 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
3709 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
3710 t4_write_reg(adap, PORT_REG(idx, i), 0);
3711 for (i = 0; i < 4; i++)
3712 if (bgmap & (1 << i)) {
3714 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
3716 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
3721 * t4_get_lb_stats - collect loopback port statistics
3722 * @adap: the adapter
3723 * @idx: the loopback port index
3724 * @p: the stats structure to fill
3726 * Return HW statistics for the given loopback port.
3728 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
3730 u32 bgmap = get_mps_bg_map(adap, idx);
3732 #define GET_STAT(name) \
3733 t4_read_reg64(adap, PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L))
3734 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3736 p->octets = GET_STAT(BYTES);
3737 p->frames = GET_STAT(FRAMES);
3738 p->bcast_frames = GET_STAT(BCAST);
3739 p->mcast_frames = GET_STAT(MCAST);
3740 p->ucast_frames = GET_STAT(UCAST);
3741 p->error_frames = GET_STAT(ERROR);
3743 p->frames_64 = GET_STAT(64B);
3744 p->frames_65_127 = GET_STAT(65B_127B);
3745 p->frames_128_255 = GET_STAT(128B_255B);
3746 p->frames_256_511 = GET_STAT(256B_511B);
3747 p->frames_512_1023 = GET_STAT(512B_1023B);
3748 p->frames_1024_1518 = GET_STAT(1024B_1518B);
3749 p->frames_1519_max = GET_STAT(1519B_MAX);
3750 p->drop = t4_read_reg(adap, PORT_REG(idx,
3751 A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES));
3753 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
3754 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
3755 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
3756 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
3757 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
3758 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
3759 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
3760 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
3767 * t4_wol_magic_enable - enable/disable magic packet WoL
3768 * @adap: the adapter
3769 * @port: the physical port index
3770 * @addr: MAC address expected in magic packets, %NULL to disable
3772 * Enables/disables magic packet wake-on-LAN for the selected port.
3774 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
3778 t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO),
3779 (addr[2] << 24) | (addr[3] << 16) |
3780 (addr[4] << 8) | addr[5]);
3781 t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI),
3782 (addr[0] << 8) | addr[1]);
3784 t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2), F_MAGICEN,
3785 V_MAGICEN(addr != NULL));
3789 * t4_wol_pat_enable - enable/disable pattern-based WoL
3790 * @adap: the adapter
3791 * @port: the physical port index
3792 * @map: bitmap of which HW pattern filters to set
3793 * @mask0: byte mask for bytes 0-63 of a packet
3794 * @mask1: byte mask for bytes 64-127 of a packet
3795 * @crc: Ethernet CRC for selected bytes
3796 * @enable: enable/disable switch
3798 * Sets the pattern filters indicated in @map to mask out the bytes
3799 * specified in @mask0/@mask1 in received packets and compare the CRC of
3800 * the resulting packet against @crc. If @enable is %true pattern-based
3801 * WoL is enabled, otherwise disabled.
3803 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
3804 u64 mask0, u64 mask1, unsigned int crc, bool enable)
3809 t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2),
3816 #define EPIO_REG(name) PORT_REG(port, A_XGMAC_PORT_EPIO_##name)
3818 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
3819 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
3820 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
3822 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
3826 /* write byte masks */
3827 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
3828 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
3829 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
3830 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
3834 t4_write_reg(adap, EPIO_REG(DATA0), crc);
3835 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
3836 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
3837 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
3842 t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2), 0, F_PATEN);
3847 * t4_mk_filtdelwr - create a delete filter WR
3848 * @ftid: the filter ID
3849 * @wr: the filter work request to populate
3850 * @qid: ingress queue to receive the delete notification
3852 * Creates a filter work request to delete the supplied filter. If @qid is
3853 * negative the delete notification is suppressed.
3855 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
3857 memset(wr, 0, sizeof(*wr));
3858 wr->op_pkd = htonl(V_FW_WR_OP(FW_FILTER_WR));
3859 wr->len16_pkd = htonl(V_FW_WR_LEN16(sizeof(*wr) / 16));
3860 wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
3861 V_FW_FILTER_WR_NOREPLY(qid < 0));
3862 wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
3864 wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
3867 #define INIT_CMD(var, cmd, rd_wr) do { \
3868 (var).op_to_write = htonl(V_FW_CMD_OP(FW_##cmd##_CMD) | \
3869 F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
3870 (var).retval_len16 = htonl(FW_LEN16(var)); \
3873 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, u32 addr, u32 val)
3875 struct fw_ldst_cmd c;
3877 memset(&c, 0, sizeof(c));
3878 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3879 F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
3880 c.cycles_to_len16 = htonl(FW_LEN16(c));
3881 c.u.addrval.addr = htonl(addr);
3882 c.u.addrval.val = htonl(val);
3884 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3888 * t4_mdio_rd - read a PHY register through MDIO
3889 * @adap: the adapter
3890 * @mbox: mailbox to use for the FW command
3891 * @phy_addr: the PHY address
3892 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
3893 * @reg: the register to read
3894 * @valp: where to store the value
3896 * Issues a FW command through the given mailbox to read a PHY register.
3898 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
3899 unsigned int mmd, unsigned int reg, unsigned int *valp)
3902 struct fw_ldst_cmd c;
3904 memset(&c, 0, sizeof(c));
3905 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3906 F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
3907 c.cycles_to_len16 = htonl(FW_LEN16(c));
3908 c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
3909 V_FW_LDST_CMD_MMD(mmd));
3910 c.u.mdio.raddr = htons(reg);
3912 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3914 *valp = ntohs(c.u.mdio.rval);
3919 * t4_mdio_wr - write a PHY register through MDIO
3920 * @adap: the adapter
3921 * @mbox: mailbox to use for the FW command
3922 * @phy_addr: the PHY address
3923 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
3924 * @reg: the register to write
3925 * @valp: value to write
3927 * Issues a FW command through the given mailbox to write a PHY register.
3929 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
3930 unsigned int mmd, unsigned int reg, unsigned int val)
3932 struct fw_ldst_cmd c;
3934 memset(&c, 0, sizeof(c));
3935 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3936 F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
3937 c.cycles_to_len16 = htonl(FW_LEN16(c));
3938 c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
3939 V_FW_LDST_CMD_MMD(mmd));
3940 c.u.mdio.raddr = htons(reg);
3941 c.u.mdio.rval = htons(val);
3943 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3947 * t4_sge_ctxt_flush - flush the SGE context cache
3948 * @adap: the adapter
3949 * @mbox: mailbox to use for the FW command
3951 * Issues a FW command through the given mailbox to flush the
3952 * SGE context cache.
3954 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
3957 struct fw_ldst_cmd c;
3959 memset(&c, 0, sizeof(c));
3960 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3962 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC));
3963 c.cycles_to_len16 = htonl(FW_LEN16(c));
3964 c.u.idctxt.msg_ctxtflush = htonl(F_FW_LDST_CMD_CTXTFLUSH);
3966 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3971 * t4_sge_ctxt_rd - read an SGE context through FW
3972 * @adap: the adapter
3973 * @mbox: mailbox to use for the FW command
3974 * @cid: the context id
3975 * @ctype: the context type
3976 * @data: where to store the context data
3978 * Issues a FW command through the given mailbox to read an SGE context.
3980 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
3981 enum ctxt_type ctype, u32 *data)
3984 struct fw_ldst_cmd c;
3986 if (ctype == CTXT_EGRESS)
3987 ret = FW_LDST_ADDRSPC_SGE_EGRC;
3988 else if (ctype == CTXT_INGRESS)
3989 ret = FW_LDST_ADDRSPC_SGE_INGC;
3990 else if (ctype == CTXT_FLM)
3991 ret = FW_LDST_ADDRSPC_SGE_FLMC;
3993 ret = FW_LDST_ADDRSPC_SGE_CONMC;
3995 memset(&c, 0, sizeof(c));
3996 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3997 F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(ret));
3998 c.cycles_to_len16 = htonl(FW_LEN16(c));
3999 c.u.idctxt.physid = htonl(cid);
4001 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4003 data[0] = ntohl(c.u.idctxt.ctxt_data0);
4004 data[1] = ntohl(c.u.idctxt.ctxt_data1);
4005 data[2] = ntohl(c.u.idctxt.ctxt_data2);
4006 data[3] = ntohl(c.u.idctxt.ctxt_data3);
4007 data[4] = ntohl(c.u.idctxt.ctxt_data4);
4008 data[5] = ntohl(c.u.idctxt.ctxt_data5);
4014 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
4015 * @adap: the adapter
4016 * @cid: the context id
4017 * @ctype: the context type
4018 * @data: where to store the context data
4020 * Reads an SGE context directly, bypassing FW. This is only for
4021 * debugging when FW is unavailable.
4023 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
4028 t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
4029 ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
4031 for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
4032 *data++ = t4_read_reg(adap, i);
4037 * t4_fw_hello - establish communication with FW
4038 * @adap: the adapter
4039 * @mbox: mailbox to use for the FW command
4040 * @evt_mbox: mailbox to receive async FW events
4041 * @master: specifies the caller's willingness to be the device master
4042 * @state: returns the current device state (if non-NULL)
4044 * Issues a command to establish communication with FW. Returns either
4045 * an error (negative integer) or the mailbox of the Master PF.
4047 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
4048 enum dev_master master, enum dev_state *state)
4051 struct fw_hello_cmd c;
4053 unsigned int master_mbox;
4054 int retries = FW_CMD_HELLO_RETRIES;
4057 memset(&c, 0, sizeof(c));
4058 INIT_CMD(c, HELLO, WRITE);
4059 c.err_to_clearinit = htonl(
4060 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
4061 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
4062 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
4063 M_FW_HELLO_CMD_MBMASTER) |
4064 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
4065 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
4066 F_FW_HELLO_CMD_CLEARINIT);
4069 * Issue the HELLO command to the firmware. If it's not successful
4070 * but indicates that we got a "busy" or "timeout" condition, retry
4071 * the HELLO until we exhaust our retry limit.
4073 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4074 if (ret != FW_SUCCESS) {
4075 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
4080 v = ntohl(c.err_to_clearinit);
4081 master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
4083 if (v & F_FW_HELLO_CMD_ERR)
4084 *state = DEV_STATE_ERR;
4085 else if (v & F_FW_HELLO_CMD_INIT)
4086 *state = DEV_STATE_INIT;
4088 *state = DEV_STATE_UNINIT;
4092 * If we're not the Master PF then we need to wait around for the
4093 * Master PF Driver to finish setting up the adapter.
4095 * Note that we also do this wait if we're a non-Master-capable PF and
4096 * there is no current Master PF; a Master PF may show up momentarily
4097 * and we wouldn't want to fail pointlessly. (This can happen when an
4098 * OS loads lots of different drivers rapidly at the same time). In
4099 * this case, the Master PF returned by the firmware will be
4100 * M_PCIE_FW_MASTER so the test below will work ...
4102 if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
4103 master_mbox != mbox) {
4104 int waiting = FW_CMD_HELLO_TIMEOUT;
4107 * Wait for the firmware to either indicate an error or
4108 * initialized state. If we see either of these we bail out
4109 * and report the issue to the caller. If we exhaust the
4110 * "hello timeout" and we haven't exhausted our retries, try
4111 * again. Otherwise bail with a timeout error.
4120 * If neither Error nor Initialialized are indicated
4121 * by the firmware keep waiting till we exhaust our
4122 * timeout ... and then retry if we haven't exhausted
4125 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
4126 if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
4137 * We either have an Error or Initialized condition
4138 * report errors preferentially.
4141 if (pcie_fw & F_PCIE_FW_ERR)
4142 *state = DEV_STATE_ERR;
4143 else if (pcie_fw & F_PCIE_FW_INIT)
4144 *state = DEV_STATE_INIT;
4148 * If we arrived before a Master PF was selected and
4149 * there's not a valid Master PF, grab its identity
4152 if (master_mbox == M_PCIE_FW_MASTER &&
4153 (pcie_fw & F_PCIE_FW_MASTER_VLD))
4154 master_mbox = G_PCIE_FW_MASTER(pcie_fw);
4163 * t4_fw_bye - end communication with FW
4164 * @adap: the adapter
4165 * @mbox: mailbox to use for the FW command
4167 * Issues a command to terminate communication with FW.
4169 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
4171 struct fw_bye_cmd c;
4173 memset(&c, 0, sizeof(c));
4174 INIT_CMD(c, BYE, WRITE);
4175 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4179 * t4_fw_reset - issue a reset to FW
4180 * @adap: the adapter
4181 * @mbox: mailbox to use for the FW command
4182 * @reset: specifies the type of reset to perform
4184 * Issues a reset command of the specified type to FW.
4186 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
4188 struct fw_reset_cmd c;
4190 memset(&c, 0, sizeof(c));
4191 INIT_CMD(c, RESET, WRITE);
4192 c.val = htonl(reset);
4193 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4197 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
4198 * @adap: the adapter
4199 * @mbox: mailbox to use for the FW RESET command (if desired)
4200 * @force: force uP into RESET even if FW RESET command fails
4202 * Issues a RESET command to firmware (if desired) with a HALT indication
4203 * and then puts the microprocessor into RESET state. The RESET command
4204 * will only be issued if a legitimate mailbox is provided (mbox <=
4205 * M_PCIE_FW_MASTER).
4207 * This is generally used in order for the host to safely manipulate the
4208 * adapter without fear of conflicting with whatever the firmware might
4209 * be doing. The only way out of this state is to RESTART the firmware
4212 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
4217 * If a legitimate mailbox is provided, issue a RESET command
4218 * with a HALT indication.
4220 if (mbox <= M_PCIE_FW_MASTER) {
4221 struct fw_reset_cmd c;
4223 memset(&c, 0, sizeof(c));
4224 INIT_CMD(c, RESET, WRITE);
4225 c.val = htonl(F_PIORST | F_PIORSTMODE);
4226 c.halt_pkd = htonl(F_FW_RESET_CMD_HALT);
4227 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4231 * Normally we won't complete the operation if the firmware RESET
4232 * command fails but if our caller insists we'll go ahead and put the
4233 * uP into RESET. This can be useful if the firmware is hung or even
4234 * missing ... We'll have to take the risk of putting the uP into
4235 * RESET without the cooperation of firmware in that case.
4237 * We also force the firmware's HALT flag to be on in case we bypassed
4238 * the firmware RESET command above or we're dealing with old firmware
4239 * which doesn't have the HALT capability. This will serve as a flag
4240 * for the incoming firmware to know that it's coming out of a HALT
4241 * rather than a RESET ... if it's new enough to understand that ...
4243 if (ret == 0 || force) {
4244 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
4245 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, F_PCIE_FW_HALT);
4249 * And we always return the result of the firmware RESET command
4250 * even when we force the uP into RESET ...
4256 * t4_fw_restart - restart the firmware by taking the uP out of RESET
4257 * @adap: the adapter
4258 * @reset: if we want to do a RESET to restart things
4260 * Restart firmware previously halted by t4_fw_halt(). On successful
4261 * return the previous PF Master remains as the new PF Master and there
4262 * is no need to issue a new HELLO command, etc.
4264 * We do this in two ways:
4266 * 1. If we're dealing with newer firmware we'll simply want to take
4267 * the chip's microprocessor out of RESET. This will cause the
4268 * firmware to start up from its start vector. And then we'll loop
4269 * until the firmware indicates it's started again (PCIE_FW.HALT
4270 * reset to 0) or we timeout.
4272 * 2. If we're dealing with older firmware then we'll need to RESET
4273 * the chip since older firmware won't recognize the PCIE_FW.HALT
4274 * flag and automatically RESET itself on startup.
4276 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
4280 * Since we're directing the RESET instead of the firmware
4281 * doing it automatically, we need to clear the PCIE_FW.HALT
4284 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
4287 * If we've been given a valid mailbox, first try to get the
4288 * firmware to do the RESET. If that works, great and we can
4289 * return success. Otherwise, if we haven't been given a
4290 * valid mailbox or the RESET command failed, fall back to
4291 * hitting the chip with a hammer.
4293 if (mbox <= M_PCIE_FW_MASTER) {
4294 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
4296 if (t4_fw_reset(adap, mbox,
4297 F_PIORST | F_PIORSTMODE) == 0)
4301 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
4306 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
4307 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
4308 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
4319 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
4320 * @adap: the adapter
4321 * @mbox: mailbox to use for the FW RESET command (if desired)
4322 * @fw_data: the firmware image to write
4324 * @force: force upgrade even if firmware doesn't cooperate
4326 * Perform all of the steps necessary for upgrading an adapter's
4327 * firmware image. Normally this requires the cooperation of the
4328 * existing firmware in order to halt all existing activities
4329 * but if an invalid mailbox token is passed in we skip that step
4330 * (though we'll still put the adapter microprocessor into RESET in
4333 * On successful return the new firmware will have been loaded and
4334 * the adapter will have been fully RESET losing all previous setup
4335 * state. On unsuccessful return the adapter may be completely hosed ...
4336 * positive errno indicates that the adapter is ~probably~ intact, a
4337 * negative errno indicates that things are looking bad ...
4339 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
4340 const u8 *fw_data, unsigned int size, int force)
4342 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
4345 ret = t4_fw_halt(adap, mbox, force);
4346 if (ret < 0 && !force)
4349 ret = t4_load_fw(adap, fw_data, size);
4354 * Older versions of the firmware don't understand the new
4355 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
4356 * restart. So for newly loaded older firmware we'll have to do the
4357 * RESET for it so it starts up on a clean slate. We can tell if
4358 * the newly loaded firmware will handle this right by checking
4359 * its header flags to see if it advertises the capability.
4361 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
4362 return t4_fw_restart(adap, mbox, reset);
4366 * t4_fw_initialize - ask FW to initialize the device
4367 * @adap: the adapter
4368 * @mbox: mailbox to use for the FW command
4370 * Issues a command to FW to partially initialize the device. This
4371 * performs initialization that generally doesn't depend on user input.
4373 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
4375 struct fw_initialize_cmd c;
4377 memset(&c, 0, sizeof(c));
4378 INIT_CMD(c, INITIALIZE, WRITE);
4379 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4383 * t4_query_params - query FW or device parameters
4384 * @adap: the adapter
4385 * @mbox: mailbox to use for the FW command
4388 * @nparams: the number of parameters
4389 * @params: the parameter names
4390 * @val: the parameter values
4392 * Reads the value of FW or device parameters. Up to 7 parameters can be
4395 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4396 unsigned int vf, unsigned int nparams, const u32 *params,
4400 struct fw_params_cmd c;
4401 __be32 *p = &c.param[0].mnem;
4406 memset(&c, 0, sizeof(c));
4407 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4408 F_FW_CMD_READ | V_FW_PARAMS_CMD_PFN(pf) |
4409 V_FW_PARAMS_CMD_VFN(vf));
4410 c.retval_len16 = htonl(FW_LEN16(c));
4412 for (i = 0; i < nparams; i++, p += 2)
4413 *p = htonl(*params++);
4415 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4417 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
4423 * t4_set_params - sets FW or device parameters
4424 * @adap: the adapter
4425 * @mbox: mailbox to use for the FW command
4428 * @nparams: the number of parameters
4429 * @params: the parameter names
4430 * @val: the parameter values
4432 * Sets the value of FW or device parameters. Up to 7 parameters can be
4433 * specified at once.
4435 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4436 unsigned int vf, unsigned int nparams, const u32 *params,
4439 struct fw_params_cmd c;
4440 __be32 *p = &c.param[0].mnem;
4445 memset(&c, 0, sizeof(c));
4446 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4447 F_FW_CMD_WRITE | V_FW_PARAMS_CMD_PFN(pf) |
4448 V_FW_PARAMS_CMD_VFN(vf));
4449 c.retval_len16 = htonl(FW_LEN16(c));
4452 *p++ = htonl(*params++);
4453 *p++ = htonl(*val++);
4456 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4460 * t4_cfg_pfvf - configure PF/VF resource limits
4461 * @adap: the adapter
4462 * @mbox: mailbox to use for the FW command
4463 * @pf: the PF being configured
4464 * @vf: the VF being configured
4465 * @txq: the max number of egress queues
4466 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
4467 * @rxqi: the max number of interrupt-capable ingress queues
4468 * @rxq: the max number of interruptless ingress queues
4469 * @tc: the PCI traffic class
4470 * @vi: the max number of virtual interfaces
4471 * @cmask: the channel access rights mask for the PF/VF
4472 * @pmask: the port access rights mask for the PF/VF
4473 * @nexact: the maximum number of exact MPS filters
4474 * @rcaps: read capabilities
4475 * @wxcaps: write/execute capabilities
4477 * Configures resource limits and capabilities for a physical or virtual
4480 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
4481 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
4482 unsigned int rxqi, unsigned int rxq, unsigned int tc,
4483 unsigned int vi, unsigned int cmask, unsigned int pmask,
4484 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
4486 struct fw_pfvf_cmd c;
4488 memset(&c, 0, sizeof(c));
4489 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
4490 F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
4491 V_FW_PFVF_CMD_VFN(vf));
4492 c.retval_len16 = htonl(FW_LEN16(c));
4493 c.niqflint_niq = htonl(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
4494 V_FW_PFVF_CMD_NIQ(rxq));
4495 c.type_to_neq = htonl(V_FW_PFVF_CMD_CMASK(cmask) |
4496 V_FW_PFVF_CMD_PMASK(pmask) |
4497 V_FW_PFVF_CMD_NEQ(txq));
4498 c.tc_to_nexactf = htonl(V_FW_PFVF_CMD_TC(tc) | V_FW_PFVF_CMD_NVI(vi) |
4499 V_FW_PFVF_CMD_NEXACTF(nexact));
4500 c.r_caps_to_nethctrl = htonl(V_FW_PFVF_CMD_R_CAPS(rcaps) |
4501 V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
4502 V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
4503 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4507 * t4_alloc_vi_func - allocate a virtual interface
4508 * @adap: the adapter
4509 * @mbox: mailbox to use for the FW command
4510 * @port: physical port associated with the VI
4511 * @pf: the PF owning the VI
4512 * @vf: the VF owning the VI
4513 * @nmac: number of MAC addresses needed (1 to 5)
4514 * @mac: the MAC addresses of the VI
4515 * @rss_size: size of RSS table slice associated with this VI
4516 * @portfunc: which Port Application Function MAC Address is desired
4517 * @idstype: Intrusion Detection Type
4519 * Allocates a virtual interface for the given physical port. If @mac is
4520 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
4521 * @mac should be large enough to hold @nmac Ethernet addresses, they are
4522 * stored consecutively so the space needed is @nmac * 6 bytes.
4523 * Returns a negative error number or the non-negative VI id.
4525 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
4526 unsigned int port, unsigned int pf, unsigned int vf,
4527 unsigned int nmac, u8 *mac, unsigned int *rss_size,
4528 unsigned int portfunc, unsigned int idstype)
4533 memset(&c, 0, sizeof(c));
4534 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
4535 F_FW_CMD_WRITE | F_FW_CMD_EXEC |
4536 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
4537 c.alloc_to_len16 = htonl(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
4538 c.type_to_viid = htons(V_FW_VI_CMD_TYPE(idstype) |
4539 V_FW_VI_CMD_FUNC(portfunc));
4540 c.portid_pkd = V_FW_VI_CMD_PORTID(port);
4543 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4548 memcpy(mac, c.mac, sizeof(c.mac));
4551 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
4553 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
4555 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
4557 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
4561 *rss_size = G_FW_VI_CMD_RSSSIZE(ntohs(c.rsssize_pkd));
4562 return G_FW_VI_CMD_VIID(htons(c.type_to_viid));
4566 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface
4567 * @adap: the adapter
4568 * @mbox: mailbox to use for the FW command
4569 * @port: physical port associated with the VI
4570 * @pf: the PF owning the VI
4571 * @vf: the VF owning the VI
4572 * @nmac: number of MAC addresses needed (1 to 5)
4573 * @mac: the MAC addresses of the VI
4574 * @rss_size: size of RSS table slice associated with this VI
4576 * backwards compatible and convieniance routine to allocate a Virtual
4577 * Interface with a Ethernet Port Application Function and Intrustion
4578 * Detection System disabled.
4580 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
4581 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
4582 unsigned int *rss_size)
4584 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
4589 * t4_free_vi - free a virtual interface
4590 * @adap: the adapter
4591 * @mbox: mailbox to use for the FW command
4592 * @pf: the PF owning the VI
4593 * @vf: the VF owning the VI
4594 * @viid: virtual interface identifiler
4596 * Free a previously allocated virtual interface.
4598 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
4599 unsigned int vf, unsigned int viid)
4603 memset(&c, 0, sizeof(c));
4604 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) |
4607 V_FW_VI_CMD_PFN(pf) |
4608 V_FW_VI_CMD_VFN(vf));
4609 c.alloc_to_len16 = htonl(F_FW_VI_CMD_FREE | FW_LEN16(c));
4610 c.type_to_viid = htons(V_FW_VI_CMD_VIID(viid));
4612 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4616 * t4_set_rxmode - set Rx properties of a virtual interface
4617 * @adap: the adapter
4618 * @mbox: mailbox to use for the FW command
4620 * @mtu: the new MTU or -1
4621 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
4622 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
4623 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
4624 * @vlanex: 1 to enable HVLAN extraction, 0 to disable it, -1 no change
4625 * @sleep_ok: if true we may sleep while awaiting command completion
4627 * Sets Rx properties of a virtual interface.
4629 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
4630 int mtu, int promisc, int all_multi, int bcast, int vlanex,
4633 struct fw_vi_rxmode_cmd c;
4635 /* convert to FW values */
4637 mtu = M_FW_VI_RXMODE_CMD_MTU;
4639 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
4641 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
4643 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
4645 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
4647 memset(&c, 0, sizeof(c));
4648 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | F_FW_CMD_REQUEST |
4649 F_FW_CMD_WRITE | V_FW_VI_RXMODE_CMD_VIID(viid));
4650 c.retval_len16 = htonl(FW_LEN16(c));
4651 c.mtu_to_vlanexen = htonl(V_FW_VI_RXMODE_CMD_MTU(mtu) |
4652 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
4653 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
4654 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
4655 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
4656 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
4660 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
4661 * @adap: the adapter
4662 * @mbox: mailbox to use for the FW command
4664 * @free: if true any existing filters for this VI id are first removed
4665 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
4666 * @addr: the MAC address(es)
4667 * @idx: where to store the index of each allocated filter
4668 * @hash: pointer to hash address filter bitmap
4669 * @sleep_ok: call is allowed to sleep
4671 * Allocates an exact-match filter for each of the supplied addresses and
4672 * sets it to the corresponding address. If @idx is not %NULL it should
4673 * have at least @naddr entries, each of which will be set to the index of
4674 * the filter allocated for the corresponding MAC address. If a filter
4675 * could not be allocated for an address its index is set to 0xffff.
4676 * If @hash is not %NULL addresses that fail to allocate an exact filter
4677 * are hashed and update the hash filter bitmap pointed at by @hash.
4679 * Returns a negative error number or the number of filters allocated.
4681 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
4682 unsigned int viid, bool free, unsigned int naddr,
4683 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
4685 int offset, ret = 0;
4686 struct fw_vi_mac_cmd c;
4687 unsigned int nfilters = 0;
4688 unsigned int rem = naddr;
4690 if (naddr > NUM_MPS_CLS_SRAM_L_INSTANCES)
4693 for (offset = 0; offset < naddr ; /**/) {
4694 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
4696 : ARRAY_SIZE(c.u.exact));
4697 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
4698 u.exact[fw_naddr]), 16);
4699 struct fw_vi_mac_exact *p;
4702 memset(&c, 0, sizeof(c));
4703 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) |
4706 V_FW_CMD_EXEC(free) |
4707 V_FW_VI_MAC_CMD_VIID(viid));
4708 c.freemacs_to_len16 = htonl(V_FW_VI_MAC_CMD_FREEMACS(free) |
4709 V_FW_CMD_LEN16(len16));
4711 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
4712 p->valid_to_idx = htons(
4713 F_FW_VI_MAC_CMD_VALID |
4714 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
4715 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
4719 * It's okay if we run out of space in our MAC address arena.
4720 * Some of the addresses we submit may get stored so we need
4721 * to run through the reply to see what the results were ...
4723 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
4724 if (ret && ret != -FW_ENOMEM)
4727 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
4728 u16 index = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
4731 idx[offset+i] = (index >= NUM_MPS_CLS_SRAM_L_INSTANCES
4734 if (index < NUM_MPS_CLS_SRAM_L_INSTANCES)
4737 *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
4745 if (ret == 0 || ret == -FW_ENOMEM)
4751 * t4_change_mac - modifies the exact-match filter for a MAC address
4752 * @adap: the adapter
4753 * @mbox: mailbox to use for the FW command
4755 * @idx: index of existing filter for old value of MAC address, or -1
4756 * @addr: the new MAC address value
4757 * @persist: whether a new MAC allocation should be persistent
4758 * @add_smt: if true also add the address to the HW SMT
4760 * Modifies an exact-match filter and sets it to the new MAC address if
4761 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
4762 * latter case the address is added persistently if @persist is %true.
4764 * Note that in general it is not possible to modify the value of a given
4765 * filter so the generic way to modify an address filter is to free the one
4766 * being used by the old address value and allocate a new filter for the
4767 * new address value.
4769 * Returns a negative error number or the index of the filter with the new
4770 * MAC value. Note that this index may differ from @idx.
4772 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
4773 int idx, const u8 *addr, bool persist, bool add_smt)
4776 struct fw_vi_mac_cmd c;
4777 struct fw_vi_mac_exact *p = c.u.exact;
4779 if (idx < 0) /* new allocation */
4780 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
4781 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
4783 memset(&c, 0, sizeof(c));
4784 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
4785 F_FW_CMD_WRITE | V_FW_VI_MAC_CMD_VIID(viid));
4786 c.freemacs_to_len16 = htonl(V_FW_CMD_LEN16(1));
4787 p->valid_to_idx = htons(F_FW_VI_MAC_CMD_VALID |
4788 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
4789 V_FW_VI_MAC_CMD_IDX(idx));
4790 memcpy(p->macaddr, addr, sizeof(p->macaddr));
4792 ret = t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), &c);
4794 ret = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
4795 if (ret >= NUM_MPS_CLS_SRAM_L_INSTANCES)
4802 * t4_set_addr_hash - program the MAC inexact-match hash filter
4803 * @adap: the adapter
4804 * @mbox: mailbox to use for the FW command
4806 * @ucast: whether the hash filter should also match unicast addresses
4807 * @vec: the value to be written to the hash filter
4808 * @sleep_ok: call is allowed to sleep
4810 * Sets the 64-bit inexact-match hash filter for a virtual interface.
4812 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
4813 bool ucast, u64 vec, bool sleep_ok)
4815 struct fw_vi_mac_cmd c;
4817 memset(&c, 0, sizeof(c));
4818 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
4819 F_FW_CMD_WRITE | V_FW_VI_ENABLE_CMD_VIID(viid));
4820 c.freemacs_to_len16 = htonl(F_FW_VI_MAC_CMD_HASHVECEN |
4821 V_FW_VI_MAC_CMD_HASHUNIEN(ucast) |
4823 c.u.hash.hashvec = cpu_to_be64(vec);
4824 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
4828 * t4_enable_vi - enable/disable a virtual interface
4829 * @adap: the adapter
4830 * @mbox: mailbox to use for the FW command
4832 * @rx_en: 1=enable Rx, 0=disable Rx
4833 * @tx_en: 1=enable Tx, 0=disable Tx
4835 * Enables/disables a virtual interface.
4837 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
4838 bool rx_en, bool tx_en)
4840 struct fw_vi_enable_cmd c;
4842 memset(&c, 0, sizeof(c));
4843 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
4844 F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
4845 c.ien_to_len16 = htonl(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
4846 V_FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
4847 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4851 * t4_identify_port - identify a VI's port by blinking its LED
4852 * @adap: the adapter
4853 * @mbox: mailbox to use for the FW command
4855 * @nblinks: how many times to blink LED at 2.5 Hz
4857 * Identifies a VI's port by blinking its LED.
4859 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
4860 unsigned int nblinks)
4862 struct fw_vi_enable_cmd c;
4864 memset(&c, 0, sizeof(c));
4865 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
4866 F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
4867 c.ien_to_len16 = htonl(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
4868 c.blinkdur = htons(nblinks);
4869 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4873 * t4_iq_start_stop - enable/disable an ingress queue and its FLs
4874 * @adap: the adapter
4875 * @mbox: mailbox to use for the FW command
4876 * @start: %true to enable the queues, %false to disable them
4877 * @pf: the PF owning the queues
4878 * @vf: the VF owning the queues
4879 * @iqid: ingress queue id
4880 * @fl0id: FL0 queue id or 0xffff if no attached FL0
4881 * @fl1id: FL1 queue id or 0xffff if no attached FL1
4883 * Starts or stops an ingress queue and its associated FLs, if any.
4885 int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
4886 unsigned int pf, unsigned int vf, unsigned int iqid,
4887 unsigned int fl0id, unsigned int fl1id)
4891 memset(&c, 0, sizeof(c));
4892 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
4893 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
4894 V_FW_IQ_CMD_VFN(vf));
4895 c.alloc_to_len16 = htonl(V_FW_IQ_CMD_IQSTART(start) |
4896 V_FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
4897 c.iqid = htons(iqid);
4898 c.fl0id = htons(fl0id);
4899 c.fl1id = htons(fl1id);
4900 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4904 * t4_iq_free - free an ingress queue and its FLs
4905 * @adap: the adapter
4906 * @mbox: mailbox to use for the FW command
4907 * @pf: the PF owning the queues
4908 * @vf: the VF owning the queues
4909 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
4910 * @iqid: ingress queue id
4911 * @fl0id: FL0 queue id or 0xffff if no attached FL0
4912 * @fl1id: FL1 queue id or 0xffff if no attached FL1
4914 * Frees an ingress queue and its associated FLs, if any.
4916 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4917 unsigned int vf, unsigned int iqtype, unsigned int iqid,
4918 unsigned int fl0id, unsigned int fl1id)
4922 memset(&c, 0, sizeof(c));
4923 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
4924 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
4925 V_FW_IQ_CMD_VFN(vf));
4926 c.alloc_to_len16 = htonl(F_FW_IQ_CMD_FREE | FW_LEN16(c));
4927 c.type_to_iqandstindex = htonl(V_FW_IQ_CMD_TYPE(iqtype));
4928 c.iqid = htons(iqid);
4929 c.fl0id = htons(fl0id);
4930 c.fl1id = htons(fl1id);
4931 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4935 * t4_eth_eq_free - free an Ethernet egress queue
4936 * @adap: the adapter
4937 * @mbox: mailbox to use for the FW command
4938 * @pf: the PF owning the queue
4939 * @vf: the VF owning the queue
4940 * @eqid: egress queue id
4942 * Frees an Ethernet egress queue.
4944 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4945 unsigned int vf, unsigned int eqid)
4947 struct fw_eq_eth_cmd c;
4949 memset(&c, 0, sizeof(c));
4950 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
4951 F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(pf) |
4952 V_FW_EQ_ETH_CMD_VFN(vf));
4953 c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
4954 c.eqid_pkd = htonl(V_FW_EQ_ETH_CMD_EQID(eqid));
4955 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4959 * t4_ctrl_eq_free - free a control egress queue
4960 * @adap: the adapter
4961 * @mbox: mailbox to use for the FW command
4962 * @pf: the PF owning the queue
4963 * @vf: the VF owning the queue
4964 * @eqid: egress queue id
4966 * Frees a control egress queue.
4968 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4969 unsigned int vf, unsigned int eqid)
4971 struct fw_eq_ctrl_cmd c;
4973 memset(&c, 0, sizeof(c));
4974 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
4975 F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(pf) |
4976 V_FW_EQ_CTRL_CMD_VFN(vf));
4977 c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
4978 c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_EQID(eqid));
4979 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4983 * t4_ofld_eq_free - free an offload egress queue
4984 * @adap: the adapter
4985 * @mbox: mailbox to use for the FW command
4986 * @pf: the PF owning the queue
4987 * @vf: the VF owning the queue
4988 * @eqid: egress queue id
4990 * Frees a control egress queue.
4992 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4993 unsigned int vf, unsigned int eqid)
4995 struct fw_eq_ofld_cmd c;
4997 memset(&c, 0, sizeof(c));
4998 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
4999 F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(pf) |
5000 V_FW_EQ_OFLD_CMD_VFN(vf));
5001 c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
5002 c.eqid_pkd = htonl(V_FW_EQ_OFLD_CMD_EQID(eqid));
5003 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5007 * t4_handle_fw_rpl - process a FW reply message
5008 * @adap: the adapter
5009 * @rpl: start of the FW message
5011 * Processes a FW message, such as link state change messages.
5013 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
5015 u8 opcode = *(const u8 *)rpl;
5016 const struct fw_port_cmd *p = (const void *)rpl;
5017 unsigned int action = G_FW_PORT_CMD_ACTION(ntohl(p->action_to_len16));
5019 if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
5020 /* link/module state change message */
5021 int speed = 0, fc = 0, i;
5022 int chan = G_FW_PORT_CMD_PORTID(ntohl(p->op_to_portid));
5023 struct port_info *pi = NULL;
5024 struct link_config *lc;
5025 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
5026 int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
5027 u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
5029 if (stat & F_FW_PORT_CMD_RXPAUSE)
5031 if (stat & F_FW_PORT_CMD_TXPAUSE)
5033 if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
5035 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
5037 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
5038 speed = SPEED_10000;
5040 for_each_port(adap, i) {
5041 pi = adap2pinfo(adap, i);
5042 if (pi->tx_chan == chan)
5047 if (link_ok != lc->link_ok || speed != lc->speed ||
5048 fc != lc->fc) { /* something changed */
5049 lc->link_ok = link_ok;
5052 t4_os_link_changed(adap, i, link_ok);
5054 if (mod != pi->mod_type) {
5056 t4_os_portmod_changed(adap, i);
5059 CH_WARN_RATELIMIT(adap,
5060 "Unknown firmware reply 0x%x (0x%x)\n", opcode, action);
5067 * get_pci_mode - determine a card's PCI mode
5068 * @adapter: the adapter
5069 * @p: where to store the PCI settings
5071 * Determines a card's PCI mode and associated parameters, such as speed
5074 static void __devinit get_pci_mode(struct adapter *adapter,
5075 struct pci_params *p)
5080 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
5082 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
5083 p->speed = val & PCI_EXP_LNKSTA_CLS;
5084 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
5089 * init_link_config - initialize a link's SW state
5090 * @lc: structure holding the link state
5091 * @caps: link capabilities
5093 * Initializes the SW state maintained for each link, including the link's
5094 * capabilities and default speed/flow-control/autonegotiation settings.
5096 static void __devinit init_link_config(struct link_config *lc,
5099 lc->supported = caps;
5100 lc->requested_speed = 0;
5102 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
5103 if (lc->supported & FW_PORT_CAP_ANEG) {
5104 lc->advertising = lc->supported & ADVERT_MASK;
5105 lc->autoneg = AUTONEG_ENABLE;
5106 lc->requested_fc |= PAUSE_AUTONEG;
5108 lc->advertising = 0;
5109 lc->autoneg = AUTONEG_DISABLE;
5113 static int __devinit wait_dev_ready(struct adapter *adap)
5117 whoami = t4_read_reg(adap, A_PL_WHOAMI);
5119 if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
5123 whoami = t4_read_reg(adap, A_PL_WHOAMI);
5124 return (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS
5128 static int __devinit get_flash_params(struct adapter *adapter)
5133 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
5135 ret = sf1_read(adapter, 3, 0, 1, &info);
5136 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
5140 if ((info & 0xff) != 0x20) /* not a Numonix flash */
5142 info >>= 16; /* log2 of size */
5143 if (info >= 0x14 && info < 0x18)
5144 adapter->params.sf_nsec = 1 << (info - 16);
5145 else if (info == 0x18)
5146 adapter->params.sf_nsec = 64;
5149 adapter->params.sf_size = 1 << info;
5153 static void __devinit set_pcie_completion_timeout(struct adapter *adapter,
5159 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
5161 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
5164 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
5169 * t4_prep_adapter - prepare SW and HW for operation
5170 * @adapter: the adapter
5171 * @reset: if true perform a HW reset
5173 * Initialize adapter SW state for the various HW modules, set initial
5174 * values for some adapter tunables, take PHYs out of reset, and
5175 * initialize the MDIO interface.
5177 int __devinit t4_prep_adapter(struct adapter *adapter)
5181 ret = wait_dev_ready(adapter);
5185 get_pci_mode(adapter, &adapter->params.pci);
5187 adapter->params.rev = t4_read_reg(adapter, A_PL_REV);
5188 /* T4A1 chip is no longer supported */
5189 if (adapter->params.rev == 1) {
5190 CH_ALERT(adapter, "T4 rev 1 chip is no longer supported\n");
5193 adapter->params.pci.vpd_cap_addr =
5194 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
5196 ret = get_flash_params(adapter);
5200 ret = get_vpd_params(adapter, &adapter->params.vpd);
5204 if (t4_read_reg(adapter, A_PCIE_REVISION) != 0) {
5206 adapter->params.cim_la_size = 2 * CIMLA_SIZE;
5209 adapter->params.cim_la_size = CIMLA_SIZE;
5212 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
5215 * Default port and clock for debugging in case we can't reach FW.
5217 adapter->params.nports = 1;
5218 adapter->params.portvec = 1;
5219 adapter->params.vpd.cclk = 50000;
5221 /* Set pci completion timeout value to 4 seconds. */
5222 set_pcie_completion_timeout(adapter, 0xd);
5226 int __devinit t4_port_init(struct port_info *p, int mbox, int pf, int vf)
5230 struct fw_port_cmd c;
5231 unsigned int rss_size;
5232 adapter_t *adap = p->adapter;
5234 memset(&c, 0, sizeof(c));
5236 for (i = 0, j = -1; i <= p->port_id; i++) {
5239 } while ((adap->params.portvec & (1 << j)) == 0);
5242 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) |
5243 F_FW_CMD_REQUEST | F_FW_CMD_READ |
5244 V_FW_PORT_CMD_PORTID(j));
5245 c.action_to_len16 = htonl(
5246 V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
5248 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5252 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
5259 p->rss_size = rss_size;
5260 t4_os_set_hw_addr(adap, p->port_id, addr);
5262 ret = ntohl(c.u.info.lstatus_to_modtype);
5263 p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
5264 G_FW_PORT_CMD_MDIOADDR(ret) : -1;
5265 p->port_type = G_FW_PORT_CMD_PTYPE(ret);
5266 p->mod_type = G_FW_PORT_CMD_MODTYPE(ret);
5268 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
5273 int t4_config_scheduler(struct adapter *adapter, int mode, int level,
5274 int pktsize, int sched_class, int port, int unit,
5275 int rate, int weight, int minrate, int maxrate)
5277 struct fw_sched_cmd cmd, rpl;
5279 if (rate < 0 || unit < 0)
5282 memset(&cmd, 0, sizeof(cmd));
5283 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
5284 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
5285 cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(sizeof(cmd)/16));
5287 cmd.u.params.sc = 1;
5288 cmd.u.params.level = level;
5289 cmd.u.params.mode = mode;
5290 cmd.u.params.ch = port;
5291 cmd.u.params.cl = sched_class;
5292 cmd.u.params.rate = rate;
5293 cmd.u.params.unit = unit;
5296 case FW_SCHED_PARAMS_LEVEL_CH_WRR:
5297 case FW_SCHED_PARAMS_LEVEL_CL_WRR:
5298 cmd.u.params.weight = cpu_to_be16(weight);
5300 case FW_SCHED_PARAMS_LEVEL_CH_RL:
5301 case FW_SCHED_PARAMS_LEVEL_CL_RL:
5302 cmd.u.params.max = cpu_to_be32(maxrate);
5303 cmd.u.params.min = cpu_to_be32(minrate);
5304 cmd.u.params.pktsize = cpu_to_be16(pktsize);
5310 return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl, 1);