2 * Copyright (c) 2017-2018 Cavium, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
35 #include "ecore_spq.h"
37 #include "ecore_gtt_reg_addr.h"
38 #include "ecore_init_ops.h"
39 #include "ecore_rt_defs.h"
40 #include "ecore_int.h"
43 #include "ecore_sriov.h"
45 #include "ecore_hw_defs.h"
46 #include "ecore_hsi_common.h"
47 #include "ecore_mcp.h"
48 #include "ecore_dbg_fw_funcs.h"
51 /* This is nasty, but diag is using the drv_dbg_fw_funcs.c [non-ecore flavor],
52 * and so the functions are lacking ecore prefix.
53 * If there would be other clients needing this [or if the content that isn't
54 * really optional there would increase], we'll need to re-think this.
56 enum dbg_status dbg_read_attn(struct ecore_hwfn *dev,
57 struct ecore_ptt *ptt,
59 enum dbg_attn_type attn_type,
61 struct dbg_attn_block_result *results);
63 enum dbg_status dbg_parse_attn(struct ecore_hwfn *dev,
64 struct dbg_attn_block_result *results);
66 const char* dbg_get_status_str(enum dbg_status status);
68 #define ecore_dbg_read_attn(hwfn, ptt, id, type, clear, results) \
69 dbg_read_attn(hwfn, ptt, id, type, clear, results)
70 #define ecore_dbg_parse_attn(hwfn, results) \
71 dbg_parse_attn(hwfn, results)
72 #define ecore_dbg_get_status_str(status) \
73 dbg_get_status_str(status)
76 struct ecore_pi_info {
77 ecore_int_comp_cb_t comp_cb;
78 void *cookie; /* Will be sent to the completion callback function */
81 struct ecore_sb_sp_info {
82 struct ecore_sb_info sb_info;
83 /* per protocol index data */
84 struct ecore_pi_info pi_info_arr[PIS_PER_SB_E4];
87 enum ecore_attention_type {
89 ECORE_ATTN_TYPE_PARITY,
92 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
93 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
95 struct aeu_invert_reg_bit {
98 #define ATTENTION_PARITY (1 << 0)
100 #define ATTENTION_LENGTH_MASK (0x00000ff0)
101 #define ATTENTION_LENGTH_SHIFT (4)
102 #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
103 ATTENTION_LENGTH_SHIFT)
104 #define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT)
105 #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
106 #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
109 /* Multiple bits start with this offset */
110 #define ATTENTION_OFFSET_MASK (0x000ff000)
111 #define ATTENTION_OFFSET_SHIFT (12)
113 #define ATTENTION_BB_MASK (0x00700000)
114 #define ATTENTION_BB_SHIFT (20)
115 #define ATTENTION_BB(value) (value << ATTENTION_BB_SHIFT)
116 #define ATTENTION_BB_DIFFERENT (1 << 23)
118 #define ATTENTION_CLEAR_ENABLE (1 << 28)
121 /* Callback to call if attention will be triggered */
122 enum _ecore_status_t (*cb)(struct ecore_hwfn *p_hwfn);
124 enum block_id block_index;
127 struct aeu_invert_reg {
128 struct aeu_invert_reg_bit bits[32];
131 #define MAX_ATTN_GRPS (8)
132 #define NUM_ATTN_REGS (9)
134 static enum _ecore_status_t ecore_mcp_attn_cb(struct ecore_hwfn *p_hwfn)
136 u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE);
138 DP_INFO(p_hwfn->p_dev, "MCP_REG_CPU_STATE: %08x - Masking...\n",
140 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK,
143 return ECORE_SUCCESS;
146 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK (0x3c000)
147 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT (14)
148 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK (0x03fc0)
149 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT (6)
150 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK (0x00020)
151 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT (5)
152 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK (0x0001e)
153 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT (1)
154 #define ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK (0x1)
155 #define ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT (0)
156 #define ECORE_PSWHST_ATTENTION_VF_DISABLED (0x1)
157 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1)
158 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1)
159 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0)
160 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0x1e)
161 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1)
162 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x20)
163 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5)
164 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0x3fc0)
165 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6)
166 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0x3c000)
167 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14)
168 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0x3fc0000)
169 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18)
170 static enum _ecore_status_t ecore_pswhst_attn_cb(struct ecore_hwfn *p_hwfn)
172 u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, PSWHST_REG_VF_DISABLED_ERROR_VALID);
174 /* Disabled VF access */
175 if (tmp & ECORE_PSWHST_ATTENTION_VF_DISABLED) {
178 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
179 PSWHST_REG_VF_DISABLED_ERROR_ADDRESS);
180 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
181 PSWHST_REG_VF_DISABLED_ERROR_DATA);
182 DP_INFO(p_hwfn->p_dev, "PF[0x%02x] VF [0x%02x] [Valid 0x%02x] Client [0x%02x] Write [0x%02x] Addr [0x%08x]\n",
183 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK) >>
184 ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT),
185 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK) >>
186 ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT),
187 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK) >>
188 ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT),
189 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK) >>
190 ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT),
191 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK) >>
192 ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT),
196 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
197 PSWHST_REG_INCORRECT_ACCESS_VALID);
198 if (tmp & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS) {
199 u32 addr, data, length;
201 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
202 PSWHST_REG_INCORRECT_ACCESS_ADDRESS);
203 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
204 PSWHST_REG_INCORRECT_ACCESS_DATA);
205 length = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
206 PSWHST_REG_INCORRECT_ACCESS_LENGTH);
208 DP_INFO(p_hwfn->p_dev, "Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n",
210 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK) >>
211 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT),
212 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK) >>
213 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT),
214 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK) >>
215 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT),
216 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK) >>
217 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT),
218 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK) >>
219 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT),
220 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK) >>
221 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT),
225 /* TODO - We know 'some' of these are legal due to virtualization,
226 * but is it true for all of them?
228 return ECORE_SUCCESS;
231 #define ECORE_GRC_ATTENTION_VALID_BIT (1 << 0)
232 #define ECORE_GRC_ATTENTION_ADDRESS_MASK (0x7fffff << 0)
233 #define ECORE_GRC_ATTENTION_RDWR_BIT (1 << 23)
234 #define ECORE_GRC_ATTENTION_MASTER_MASK (0xf << 24)
235 #define ECORE_GRC_ATTENTION_MASTER_SHIFT (24)
236 #define ECORE_GRC_ATTENTION_PF_MASK (0xf)
237 #define ECORE_GRC_ATTENTION_VF_MASK (0xff << 4)
238 #define ECORE_GRC_ATTENTION_VF_SHIFT (4)
239 #define ECORE_GRC_ATTENTION_PRIV_MASK (0x3 << 14)
240 #define ECORE_GRC_ATTENTION_PRIV_SHIFT (14)
241 #define ECORE_GRC_ATTENTION_PRIV_VF (0)
242 static const char* grc_timeout_attn_master_to_str(u8 master)
245 case 1: return "PXP";
246 case 2: return "MCP";
247 case 3: return "MSDM";
248 case 4: return "PSDM";
249 case 5: return "YSDM";
250 case 6: return "USDM";
251 case 7: return "TSDM";
252 case 8: return "XSDM";
253 case 9: return "DBU";
254 case 10: return "DMAE";
260 static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn)
264 /* We've already cleared the timeout interrupt register, so we learn
265 * of interrupts via the validity register
267 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
268 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID);
269 if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT))
272 /* Read the GRC timeout information */
273 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
274 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0);
275 tmp2 = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
276 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
278 DP_NOTICE(p_hwfn->p_dev, false,
279 "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n",
281 (tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to"
283 (tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2,
284 grc_timeout_attn_master_to_str((tmp & ECORE_GRC_ATTENTION_MASTER_MASK) >>
285 ECORE_GRC_ATTENTION_MASTER_SHIFT),
286 (tmp2 & ECORE_GRC_ATTENTION_PF_MASK),
287 (((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >>
288 ECORE_GRC_ATTENTION_PRIV_SHIFT) ==
289 ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)",
290 (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >>
291 ECORE_GRC_ATTENTION_VF_SHIFT);
294 /* Regardles of anything else, clean the validity bit */
295 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
296 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0);
297 return ECORE_SUCCESS;
300 #define ECORE_PGLUE_ATTENTION_VALID (1 << 29)
301 #define ECORE_PGLUE_ATTENTION_RD_VALID (1 << 26)
302 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf << 20)
303 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20)
304 #define ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID (1 << 19)
305 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff << 24)
306 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24)
307 #define ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR (1 << 21)
308 #define ECORE_PGLUE_ATTENTION_DETAILS2_BME (1 << 22)
309 #define ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN (1 << 23)
310 #define ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23)
311 #define ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25)
312 #define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23)
314 enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
315 struct ecore_ptt *p_ptt)
319 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2);
320 if (tmp & ECORE_PGLUE_ATTENTION_VALID) {
321 u32 addr_lo, addr_hi, details;
323 addr_lo = ecore_rd(p_hwfn, p_ptt,
324 PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
325 addr_hi = ecore_rd(p_hwfn, p_ptt,
326 PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
327 details = ecore_rd(p_hwfn, p_ptt,
328 PGLUE_B_REG_TX_ERR_WR_DETAILS);
330 DP_NOTICE(p_hwfn, false,
331 "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
332 addr_hi, addr_lo, details,
333 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
334 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
335 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0),
337 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0),
338 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0),
339 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0));
342 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2);
343 if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) {
344 u32 addr_lo, addr_hi, details;
346 addr_lo = ecore_rd(p_hwfn, p_ptt,
347 PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
348 addr_hi = ecore_rd(p_hwfn, p_ptt,
349 PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
350 details = ecore_rd(p_hwfn, p_ptt,
351 PGLUE_B_REG_TX_ERR_RD_DETAILS);
353 DP_NOTICE(p_hwfn, false,
354 "Illegal read by chip from [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
355 addr_hi, addr_lo, details,
356 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
357 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
358 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0),
360 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0),
361 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0),
362 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0));
365 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
366 if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID)
367 DP_NOTICE(p_hwfn, false, "ICPL eror - %08x\n", tmp);
369 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
370 if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) {
371 u32 addr_hi, addr_lo;
373 addr_lo = ecore_rd(p_hwfn, p_ptt,
374 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
375 addr_hi = ecore_rd(p_hwfn, p_ptt,
376 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
378 DP_NOTICE(p_hwfn, false,
379 "ICPL eror - %08x [Address %08x:%08x]\n",
380 tmp, addr_hi, addr_lo);
383 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
384 if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) {
385 u32 addr_hi, addr_lo, details;
387 addr_lo = ecore_rd(p_hwfn, p_ptt,
388 PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
389 addr_hi = ecore_rd(p_hwfn, p_ptt,
390 PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
391 details = ecore_rd(p_hwfn, p_ptt,
392 PGLUE_B_REG_VF_ILT_ERR_DETAILS);
394 DP_NOTICE(p_hwfn, false,
395 "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
396 details, tmp, addr_hi, addr_lo);
399 /* Clear the indications */
400 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
402 return ECORE_SUCCESS;
405 static enum _ecore_status_t ecore_pglueb_rbc_attn_cb(struct ecore_hwfn *p_hwfn)
407 return ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt);
410 static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn)
412 DP_NOTICE(p_hwfn, false, "FW assertion!\n");
414 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FW_ASSERT);
419 static enum _ecore_status_t
420 ecore_general_attention_35(struct ecore_hwfn *p_hwfn)
422 DP_INFO(p_hwfn, "General attention 35!\n");
424 return ECORE_SUCCESS;
427 #define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff)
428 #define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
429 #define ECORE_DORQ_ATTENTION_OPAQUE_SHIFT (0x0)
430 #define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f)
431 #define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16)
433 #define ECORE_DB_REC_COUNT 10
434 #define ECORE_DB_REC_INTERVAL 100
436 /* assumes sticky overflow indication was set for this PF */
437 static enum _ecore_status_t ecore_db_rec_attn(struct ecore_hwfn *p_hwfn,
438 struct ecore_ptt *p_ptt)
440 u8 count = ECORE_DB_REC_COUNT;
443 /* wait for usage to zero or count to run out. This is necessary since
444 * EDPM doorbell transactions can take multiple 64b cycles, and as such
445 * can "split" over the pci. Possibly, the doorbell drop can happen with
446 * half an EDPM in the queue and other half dropped. Another EDPM
447 * doorbell to the same address (from doorbell recovery mechanism or
448 * from the doorbelling entity) could have first half dropped and second
449 * half interperted as continuation of the first. To prevent such
450 * malformed doorbells from reaching the device, flush the queue before
451 * releaseing the overflow sticky indication.
453 while (count-- && usage) {
454 usage = ecore_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT);
455 OSAL_UDELAY(ECORE_DB_REC_INTERVAL);
458 /* should have been depleted by now */
460 DP_NOTICE(p_hwfn->p_dev, false,
461 "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n",
462 ECORE_DB_REC_INTERVAL * ECORE_DB_REC_COUNT, usage);
463 return ECORE_TIMEOUT;
466 /* flush any pedning (e)dpm as they may never arrive */
467 ecore_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
469 /* release overflow sticky indication (stop silently dropping everything) */
470 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
472 /* repeat all last doorbells (doorbell drop recovery) */
473 ecore_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL);
475 return ECORE_SUCCESS;
478 static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn)
480 u32 int_sts, first_drop_reason, details, address, overflow,
482 struct ecore_ptt *p_ptt = p_hwfn->p_dpc_ptt;
483 enum _ecore_status_t rc;
485 int_sts = ecore_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
486 DP_NOTICE(p_hwfn->p_dev, false, "DORQ attention. int_sts was %x\n",
489 /* int_sts may be zero since all PFs were interrupted for doorbell
490 * overflow but another one already handled it. Can abort here. If
491 * This PF also requires overflow recovery we will be interrupted again.
492 * The masked almost full indication may also be set. Ignoring.
494 if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL))
495 return ECORE_SUCCESS;
497 /* check if db_drop or overflow happened */
498 if (int_sts & (DORQ_REG_INT_STS_DB_DROP |
499 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) {
501 /* obtain data about db drop/overflow */
502 first_drop_reason = ecore_rd(p_hwfn, p_ptt,
503 DORQ_REG_DB_DROP_REASON) &
504 ECORE_DORQ_ATTENTION_REASON_MASK;
505 details = ecore_rd(p_hwfn, p_ptt,
506 DORQ_REG_DB_DROP_DETAILS);
507 address = ecore_rd(p_hwfn, p_ptt,
508 DORQ_REG_DB_DROP_DETAILS_ADDRESS);
509 overflow = ecore_rd(p_hwfn, p_ptt,
510 DORQ_REG_PF_OVFL_STICKY);
511 all_drops_reason = ecore_rd(p_hwfn, p_ptt,
512 DORQ_REG_DB_DROP_DETAILS_REASON);
515 DP_NOTICE(p_hwfn->p_dev, false,
516 "Doorbell drop occurred\n"
517 "Address\t\t0x%08x\t(second BAR address)\n"
518 "FID\t\t0x%04x\t\t(Opaque FID)\n"
519 "Size\t\t0x%04x\t\t(in bytes)\n"
520 "1st drop reason\t0x%08x\t(details on first drop since last handling)\n"
521 "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n"
522 "Overflow\t0x%x\t\t(a per PF indication)\n",
523 address, GET_FIELD(details, ECORE_DORQ_ATTENTION_OPAQUE),
524 GET_FIELD(details, ECORE_DORQ_ATTENTION_SIZE) * 4,
525 first_drop_reason, all_drops_reason, overflow);
527 /* if this PF caused overflow, initiate recovery */
529 rc = ecore_db_rec_attn(p_hwfn, p_ptt);
530 if (rc != ECORE_SUCCESS)
534 /* clear the doorbell drop details and prepare for next drop */
535 ecore_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0);
537 /* mark interrupt as handeld (note: even if drop was due to a diffrent
538 * reason than overflow we mark as handled)
540 ecore_wr(p_hwfn, p_ptt, DORQ_REG_INT_STS_WR,
541 DORQ_REG_INT_STS_DB_DROP | DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR);
543 /* if there are no indications otherthan drop indications, success */
544 if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP |
545 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR |
546 DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0)
547 return ECORE_SUCCESS;
550 /* some other indication was present - non recoverable */
551 DP_INFO(p_hwfn, "DORQ fatal attention\n");
556 static enum _ecore_status_t ecore_tm_attn_cb(struct ecore_hwfn *p_hwfn)
559 if (CHIP_REV_IS_EMUL_B0(p_hwfn->p_dev)) {
560 u32 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
563 if (val & ~(TM_REG_INT_STS_1_PEND_TASK_SCAN |
564 TM_REG_INT_STS_1_PEND_CONN_SCAN))
567 if (val & (TM_REG_INT_STS_1_PEND_TASK_SCAN |
568 TM_REG_INT_STS_1_PEND_CONN_SCAN))
569 DP_INFO(p_hwfn, "TM attention on emulation - most likely results of clock-ratios\n");
570 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1);
571 val |= TM_REG_INT_MASK_1_PEND_CONN_SCAN |
572 TM_REG_INT_MASK_1_PEND_TASK_SCAN;
573 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1, val);
575 return ECORE_SUCCESS;
582 /* Instead of major changes to the data-structure, we have a some 'special'
583 * identifiers for sources that changed meaning between adapters.
585 enum aeu_invert_reg_special_type {
586 AEU_INVERT_REG_SPECIAL_CNIG_0,
587 AEU_INVERT_REG_SPECIAL_CNIG_1,
588 AEU_INVERT_REG_SPECIAL_CNIG_2,
589 AEU_INVERT_REG_SPECIAL_CNIG_3,
590 AEU_INVERT_REG_SPECIAL_MAX,
593 static struct aeu_invert_reg_bit
594 aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = {
595 {"CNIG port 0", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
596 {"CNIG port 1", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
597 {"CNIG port 2", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
598 {"CNIG port 3", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
601 /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
602 static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] =
605 { /* After Invert 1 */
606 {"GPIO0 function%d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID},
611 { /* After Invert 2 */
612 {"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
613 {"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
614 {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglueb_rbc_attn_cb, BLOCK_PGLUE_B},
615 {"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
616 {"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
617 {"SMB event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
618 {"Main Power", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
619 {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT) | (1 << ATTENTION_OFFSET_SHIFT), OSAL_NULL, MAX_BLOCK_ID},
620 {"PCIE glue/PXP VPD %d", (16 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, BLOCK_PGLCS},
625 { /* After Invert 3 */
626 {"General Attention %d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID},
631 { /* After Invert 4 */
632 {"General Attention 32", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, ecore_fw_assertion, MAX_BLOCK_ID},
633 {"General Attention %d", (2 << ATTENTION_LENGTH_SHIFT) | (33 << ATTENTION_OFFSET_SHIFT), OSAL_NULL, MAX_BLOCK_ID},
634 {"General Attention 35", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, ecore_general_attention_35, MAX_BLOCK_ID},
635 {"NWS Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
636 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0) , OSAL_NULL, BLOCK_NWS},
637 {"NWS Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
638 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1), OSAL_NULL, BLOCK_NWS},
639 {"NWM Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
640 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2), OSAL_NULL, BLOCK_NWM},
641 {"NWM Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
642 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3), OSAL_NULL, BLOCK_NWM},
643 {"MCP CPU", ATTENTION_SINGLE, ecore_mcp_attn_cb, MAX_BLOCK_ID},
644 {"MCP Watchdog timer", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
645 {"MCP M2P", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
646 {"AVS stop status ready", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
647 {"MSTAT", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
648 {"MSTAT per-path", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
649 {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID },
650 {"NIG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NIG},
651 {"BMB/OPTE/MCP", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB},
652 {"BTB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB},
653 {"BRB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB},
654 {"PRS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRS},
659 { /* After Invert 5 */
660 {"SRC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_SRC},
661 {"PB Client1", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB1},
662 {"PB Client2", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB2},
663 {"RPB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RPB},
664 {"PBF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF},
665 {"QM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_QM},
666 {"TM", ATTENTION_PAR_INT, ecore_tm_attn_cb, BLOCK_TM},
667 {"MCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MCM},
668 {"MSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSDM},
669 {"MSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSEM},
670 {"PCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PCM},
671 {"PSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSDM},
672 {"PSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSEM},
673 {"TCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCM},
674 {"TSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSDM},
675 {"TSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSEM},
680 { /* After Invert 6 */
681 {"UCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_UCM},
682 {"USDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USDM},
683 {"USEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USEM},
684 {"XCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XCM},
685 {"XSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSDM},
686 {"XSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSEM},
687 {"YCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YCM},
688 {"YSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSDM},
689 {"YSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSEM},
690 {"XYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XYLD},
691 {"TMLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TMLD},
692 {"MYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MULD},
693 {"YULD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YULD},
694 {"DORQ", ATTENTION_PAR_INT, ecore_dorq_attn_cb, BLOCK_DORQ},
695 {"DBG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DBG},
696 {"IPC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IPC},
701 { /* After Invert 7 */
702 {"CCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CCFC},
703 {"CDU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CDU},
704 {"DMAE", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DMAE},
705 {"IGU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IGU},
706 {"ATC", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
707 {"CAU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CAU},
708 {"PTU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PTU},
709 {"PRM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRM},
710 {"TCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCFC},
711 {"RDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RDIF},
712 {"TDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TDIF},
713 {"RSS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RSS},
714 {"MISC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISC},
715 {"MISCS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISCS},
716 {"PCIE", ATTENTION_PAR, OSAL_NULL, BLOCK_PCIE},
717 {"Vaux PCI core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
718 {"PSWRQ", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ},
723 { /* After Invert 8 */
724 {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ2},
725 {"PSWWR", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR},
726 {"PSWWR (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR2},
727 {"PSWRD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD},
728 {"PSWRD (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD2},
729 {"PSWHST", ATTENTION_PAR_INT, ecore_pswhst_attn_cb, BLOCK_PSWHST},
730 {"PSWHST (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWHST2},
731 {"GRC", ATTENTION_PAR_INT, ecore_grc_attn_cb, BLOCK_GRC},
732 {"CPMU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CPMU},
733 {"NCSI", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NCSI},
734 {"MSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
735 {"PSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
736 {"TSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
737 {"USEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
738 {"XSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
739 {"YSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
740 {"pxp_misc_mps", ATTENTION_PAR, OSAL_NULL, BLOCK_PGLCS},
741 {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
742 {"PERST_B assertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
743 {"PERST_B deassertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
744 {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID },
749 { /* After Invert 9 */
750 {"MCP Latched memory", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
751 {"MCP Latched scratchpad cache", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
752 {"MCP Latched ump_tx", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
753 {"MCP Latched scratchpad", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
754 {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID },
760 static struct aeu_invert_reg_bit *
761 ecore_int_aeu_translate(struct ecore_hwfn *p_hwfn,
762 struct aeu_invert_reg_bit *p_bit)
764 if (!ECORE_IS_BB(p_hwfn->p_dev))
767 if (!(p_bit->flags & ATTENTION_BB_DIFFERENT))
770 return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >>
774 static bool ecore_int_is_parity_flag(struct ecore_hwfn *p_hwfn,
775 struct aeu_invert_reg_bit *p_bit)
777 return !!(ecore_int_aeu_translate(p_hwfn, p_bit)->flags &
781 #define ATTN_STATE_BITS (0xfff)
782 #define ATTN_BITS_MASKABLE (0x3ff)
783 struct ecore_sb_attn_info {
784 /* Virtual & Physical address of the SB */
785 struct atten_status_block *sb_attn;
788 /* Last seen running index */
791 /* A mask of the AEU bits resulting in a parity error */
792 u32 parity_mask[NUM_ATTN_REGS];
794 /* A pointer to the attention description structure */
795 struct aeu_invert_reg *p_aeu_desc;
797 /* Previously asserted attentions, which are still unasserted */
800 /* Cleanup address for the link's general hw attention */
804 static u16 ecore_attn_update_idx(struct ecore_hwfn *p_hwfn,
805 struct ecore_sb_attn_info *p_sb_desc)
809 OSAL_MMIOWB(p_hwfn->p_dev);
811 index = OSAL_LE16_TO_CPU(p_sb_desc->sb_attn->sb_index);
812 if (p_sb_desc->index != index) {
813 p_sb_desc->index = index;
814 rc = ECORE_SB_ATT_IDX;
817 OSAL_MMIOWB(p_hwfn->p_dev);
823 * @brief ecore_int_assertion - handles asserted attention bits
826 * @param asserted_bits newly asserted bits
827 * @return enum _ecore_status_t
829 static enum _ecore_status_t ecore_int_assertion(struct ecore_hwfn *p_hwfn,
832 struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
835 /* Mask the source of the attention in the IGU */
836 igu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
837 IGU_REG_ATTENTION_ENABLE);
838 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
839 igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
840 igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
841 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask);
843 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
844 "inner known ATTN state: 0x%04x --> 0x%04x\n",
845 sb_attn_sw->known_attn,
846 sb_attn_sw->known_attn | asserted_bits);
847 sb_attn_sw->known_attn |= asserted_bits;
849 /* Handle MCP events */
850 if (asserted_bits & 0x100) {
851 ecore_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt);
852 /* Clean the MCP attention */
853 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
854 sb_attn_sw->mfw_attn_addr, 0);
857 /* FIXME - this will change once we'll have GOOD gtt definitions */
858 DIRECT_REG_WR(p_hwfn,
859 (u8 OSAL_IOMEM*)p_hwfn->regview +
860 GTT_BAR0_MAP_REG_IGU_CMD +
861 ((IGU_CMD_ATTN_BIT_SET_UPPER -
862 IGU_CMD_INT_ACK_BASE) << 3), (u32)asserted_bits);
864 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "set cmd IGU: 0x%04x\n",
867 return ECORE_SUCCESS;
870 static void ecore_int_attn_print(struct ecore_hwfn *p_hwfn,
871 enum block_id id, enum dbg_attn_type type,
874 struct dbg_attn_block_result attn_results;
875 enum dbg_status status;
877 OSAL_MEMSET(&attn_results, 0, sizeof(attn_results));
879 status = ecore_dbg_read_attn(p_hwfn, p_hwfn->p_dpc_ptt, id, type,
880 b_clear, &attn_results);
882 if (status != DBG_STATUS_OK)
883 DP_NOTICE(p_hwfn, true,
884 "Failed to parse attention information [status: %s]\n",
885 ecore_dbg_get_status_str(status));
887 ecore_dbg_parse_attn(p_hwfn, &attn_results);
889 if (status != DBG_STATUS_OK)
890 DP_NOTICE(p_hwfn, true,
891 "Failed to parse attention information [status: %d]\n",
894 ecore_dbg_print_attn(p_hwfn, &attn_results);
899 * @brief ecore_int_deassertion_aeu_bit - handles the effects of a single
900 * cause of the attention
903 * @param p_aeu - descriptor of an AEU bit which caused the attention
904 * @param aeu_en_reg - register offset of the AEU enable reg. which configured
905 * this bit to this group.
906 * @param bit_index - index of this bit in the aeu_en_reg
908 * @return enum _ecore_status_t
910 static enum _ecore_status_t
911 ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn,
912 struct aeu_invert_reg_bit *p_aeu,
914 const char *p_bit_name,
917 enum _ecore_status_t rc = ECORE_INVAL;
918 bool b_fatal = false;
920 DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n",
921 p_bit_name, bitmask);
923 /* Call callback before clearing the interrupt status */
925 DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n",
927 rc = p_aeu->cb(p_hwfn);
930 if (rc != ECORE_SUCCESS)
933 /* Print HW block interrupt registers */
934 if (p_aeu->block_index != MAX_BLOCK_ID)
935 ecore_int_attn_print(p_hwfn, p_aeu->block_index,
936 ATTN_TYPE_INTERRUPT, !b_fatal);
938 /* Reach assertion if attention is fatal */
940 DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n",
943 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
946 /* Prevent this Attention from being asserted in the future */
947 if (p_aeu->flags & ATTENTION_CLEAR_ENABLE ||
948 p_hwfn->p_dev->attn_clr_en) {
951 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
952 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask));
953 DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n",
961 * @brief ecore_int_deassertion_parity - handle a single parity AEU source
964 * @param p_aeu - descriptor of an AEU bit which caused the parity
965 * @param aeu_en_reg - address of the AEU enable register
968 static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn,
969 struct aeu_invert_reg_bit *p_aeu,
970 u32 aeu_en_reg, u8 bit_index)
972 u32 block_id = p_aeu->block_index, mask, val;
974 DP_NOTICE(p_hwfn->p_dev, false,
975 "%s parity attention is set [address 0x%08x, bit %d]\n",
976 p_aeu->bit_name, aeu_en_reg, bit_index);
978 if (block_id != MAX_BLOCK_ID) {
979 ecore_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false);
981 /* In A0, there's a single parity bit for several blocks */
982 if (block_id == BLOCK_BTB) {
983 ecore_int_attn_print(p_hwfn, BLOCK_OPTE,
984 ATTN_TYPE_PARITY, false);
985 ecore_int_attn_print(p_hwfn, BLOCK_MCP,
986 ATTN_TYPE_PARITY, false);
990 /* Prevent this parity error from being re-asserted */
991 mask = ~(0x1 << bit_index);
992 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
993 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask);
994 DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n",
999 * @brief - handles deassertion of previously asserted attentions.
1002 * @param deasserted_bits - newly deasserted bits
1003 * @return enum _ecore_status_t
1006 static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
1007 u16 deasserted_bits)
1009 struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
1010 u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en;
1011 u8 i, j, k, bit_idx;
1012 enum _ecore_status_t rc = ECORE_SUCCESS;
1014 /* Read the attention registers in the AEU */
1015 for (i = 0; i < NUM_ATTN_REGS; i++) {
1016 aeu_inv_arr[i] = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1017 MISC_REG_AEU_AFTER_INVERT_1_IGU +
1019 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1020 "Deasserted bits [%d]: %08x\n",
1024 /* Handle parity attentions first */
1025 for (i = 0; i < NUM_ATTN_REGS; i++)
1027 struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
1030 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32);
1031 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
1032 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
1034 /* Skip register in which no parity bit is currently set */
1038 for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
1039 struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
1041 if (ecore_int_is_parity_flag(p_hwfn, p_bit) &&
1042 !!(parities & (1 << bit_idx)))
1043 ecore_int_deassertion_parity(p_hwfn, p_bit,
1046 bit_idx += ATTENTION_LENGTH(p_bit->flags);
1050 /* Find non-parity cause for attention and act */
1051 for (k = 0; k < MAX_ATTN_GRPS; k++) {
1052 struct aeu_invert_reg_bit *p_aeu;
1054 /* Handle only groups whose attention is currently deasserted */
1055 if (!(deasserted_bits & (1 << k)))
1058 for (i = 0; i < NUM_ATTN_REGS; i++) {
1061 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
1063 k * sizeof(u32) * NUM_ATTN_REGS;
1064 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
1065 bits = aeu_inv_arr[i] & en;
1067 /* Skip if no bit from this group is currently set */
1071 /* Find all set bits from current register which belong
1072 * to current group, making them responsible for the
1073 * previous assertion.
1075 for (j = 0, bit_idx = 0; bit_idx < 32; j++)
1077 long unsigned int bitmask;
1080 /* Need to account bits with changed meaning */
1081 p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
1082 p_aeu = ecore_int_aeu_translate(p_hwfn, p_aeu);
1085 bit_len = ATTENTION_LENGTH(p_aeu->flags);
1086 if (ecore_int_is_parity_flag(p_hwfn, p_aeu)) {
1092 /* Find the bits relating to HW-block, then
1093 * shift so they'll become LSB.
1095 bitmask = bits & (((1 << bit_len) - 1) << bit);
1099 u32 flags = p_aeu->flags;
1103 num = (u8)OSAL_FIND_FIRST_BIT(&bitmask,
1106 /* Some bits represent more than a
1107 * a single interrupt. Correctly print
1110 if (ATTENTION_LENGTH(flags) > 2 ||
1111 ((flags & ATTENTION_PAR_INT) &&
1112 ATTENTION_LENGTH(flags) > 1))
1113 OSAL_SNPRINTF(bit_name, 30,
1117 OSAL_STRNCPY(bit_name,
1121 /* We now need to pass bitmask in its
1126 /* Handle source of the attention */
1127 ecore_int_deassertion_aeu_bit(p_hwfn,
1134 bit_idx += ATTENTION_LENGTH(p_aeu->flags);
1139 /* Clear IGU indication for the deasserted bits */
1140 /* FIXME - this will change once we'll have GOOD gtt definitions */
1141 DIRECT_REG_WR(p_hwfn,
1142 (u8 OSAL_IOMEM*)p_hwfn->regview +
1143 GTT_BAR0_MAP_REG_IGU_CMD +
1144 ((IGU_CMD_ATTN_BIT_CLR_UPPER -
1145 IGU_CMD_INT_ACK_BASE) << 3),
1146 ~((u32)deasserted_bits));
1148 /* Unmask deasserted attentions in IGU */
1149 aeu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1150 IGU_REG_ATTENTION_ENABLE);
1151 aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
1152 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
1154 /* Clear deassertion from inner state */
1155 sb_attn_sw->known_attn &= ~deasserted_bits;
1160 static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn)
1162 struct ecore_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
1163 struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
1164 u16 index = 0, asserted_bits, deasserted_bits;
1165 u32 attn_bits = 0, attn_acks = 0;
1166 enum _ecore_status_t rc = ECORE_SUCCESS;
1168 /* Read current attention bits/acks - safeguard against attentions
1169 * by guaranting work on a synchronized timeframe
1172 index = OSAL_LE16_TO_CPU(p_sb_attn->sb_index);
1173 attn_bits = OSAL_LE32_TO_CPU(p_sb_attn->atten_bits);
1174 attn_acks = OSAL_LE32_TO_CPU(p_sb_attn->atten_ack);
1175 } while (index != OSAL_LE16_TO_CPU(p_sb_attn->sb_index));
1176 p_sb_attn->sb_index = index;
1178 /* Attention / Deassertion are meaningful (and in correct state)
1179 * only when they differ and consistent with known state - deassertion
1180 * when previous attention & current ack, and assertion when current
1181 * attention with no previous attention
1183 asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
1184 ~p_sb_attn_sw->known_attn;
1185 deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
1186 p_sb_attn_sw->known_attn;
1188 if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100))
1190 "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
1191 index, attn_bits, attn_acks, asserted_bits,
1192 deasserted_bits, p_sb_attn_sw->known_attn);
1193 else if (asserted_bits == 0x100)
1195 "MFW indication via attention\n");
1197 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1198 "MFW indication [deassertion]\n");
1200 if (asserted_bits) {
1201 rc = ecore_int_assertion(p_hwfn, asserted_bits);
1206 if (deasserted_bits)
1207 rc = ecore_int_deassertion(p_hwfn, deasserted_bits);
1212 static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn,
1213 void OSAL_IOMEM *igu_addr, u32 ack_cons)
1215 struct igu_prod_cons_update igu_ack = { 0 };
1217 igu_ack.sb_id_and_flags =
1218 ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
1219 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
1220 (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
1221 (IGU_SEG_ACCESS_ATTN <<
1222 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
1224 DIRECT_REG_WR(p_hwfn, igu_addr, igu_ack.sb_id_and_flags);
1226 /* Both segments (interrupts & acks) are written to same place address;
1227 * Need to guarantee all commands will be received (in-order) by HW.
1229 OSAL_MMIOWB(p_hwfn->p_dev);
1230 OSAL_BARRIER(p_hwfn->p_dev);
1233 void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)
1235 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie;
1236 struct ecore_pi_info *pi_info = OSAL_NULL;
1237 struct ecore_sb_attn_info *sb_attn;
1238 struct ecore_sb_info *sb_info;
1245 if (!p_hwfn->p_sp_sb) {
1246 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sp_sb\n");
1250 sb_info = &p_hwfn->p_sp_sb->sb_info;
1251 arr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
1253 DP_ERR(p_hwfn->p_dev, "Status block is NULL - cannot ack interrupts\n");
1257 if (!p_hwfn->p_sb_attn) {
1258 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sb_attn");
1261 sb_attn = p_hwfn->p_sb_attn;
1263 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
1264 p_hwfn, p_hwfn->my_id);
1266 /* Disable ack for def status block. Required both for msix +
1267 * inta in non-mask mode, in inta does no harm.
1269 ecore_sb_ack(sb_info, IGU_INT_DISABLE, 0);
1271 /* Gather Interrupts/Attentions information */
1272 if (!sb_info->sb_virt) {
1273 DP_ERR(p_hwfn->p_dev, "Interrupt Status block is NULL - cannot check for new interrupts!\n");
1275 u32 tmp_index = sb_info->sb_ack;
1276 rc = ecore_sb_update_sb_idx(sb_info);
1277 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
1278 "Interrupt indices: 0x%08x --> 0x%08x\n",
1279 tmp_index, sb_info->sb_ack);
1282 if (!sb_attn || !sb_attn->sb_attn) {
1283 DP_ERR(p_hwfn->p_dev, "Attentions Status block is NULL - cannot check for new attentions!\n");
1285 u16 tmp_index = sb_attn->index;
1287 rc |= ecore_attn_update_idx(p_hwfn, sb_attn);
1288 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
1289 "Attention indices: 0x%08x --> 0x%08x\n",
1290 tmp_index, sb_attn->index);
1293 /* Check if we expect interrupts at this time. if not just ack them */
1294 if (!(rc & ECORE_SB_EVENT_MASK)) {
1295 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1299 /* Check the validity of the DPC ptt. If not ack interrupts and fail */
1300 if (!p_hwfn->p_dpc_ptt) {
1301 DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n");
1302 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1306 if (rc & ECORE_SB_ATT_IDX)
1307 ecore_int_attentions(p_hwfn);
1309 if (rc & ECORE_SB_IDX) {
1312 /* Since we only looked at the SB index, it's possible more
1313 * than a single protocol-index on the SB incremented.
1314 * Iterate over all configured protocol indices and check
1315 * whether something happened for each.
1317 for (pi = 0; pi < arr_size; pi++) {
1318 pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
1319 if (pi_info->comp_cb != OSAL_NULL)
1320 pi_info->comp_cb(p_hwfn, pi_info->cookie);
1324 if (sb_attn && (rc & ECORE_SB_ATT_IDX)) {
1325 /* This should be done before the interrupts are enabled,
1326 * since otherwise a new attention will be generated.
1328 ecore_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
1331 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1334 static void ecore_int_sb_attn_free(struct ecore_hwfn *p_hwfn)
1336 struct ecore_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
1341 if (p_sb->sb_attn) {
1342 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sb->sb_attn,
1344 SB_ATTN_ALIGNED_SIZE(p_hwfn));
1347 OSAL_FREE(p_hwfn->p_dev, p_sb);
1348 p_hwfn->p_sb_attn = OSAL_NULL;
1351 static void ecore_int_sb_attn_setup(struct ecore_hwfn *p_hwfn,
1352 struct ecore_ptt *p_ptt)
1354 struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1356 OSAL_MEMSET(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn));
1359 sb_info->known_attn = 0;
1361 /* Configure Attention Status Block in IGU */
1362 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L,
1363 DMA_LO(p_hwfn->p_sb_attn->sb_phys));
1364 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H,
1365 DMA_HI(p_hwfn->p_sb_attn->sb_phys));
1368 static void ecore_int_sb_attn_init(struct ecore_hwfn *p_hwfn,
1369 struct ecore_ptt *p_ptt,
1371 dma_addr_t sb_phy_addr)
1373 struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1376 sb_info->sb_attn = sb_virt_addr;
1377 sb_info->sb_phys = sb_phy_addr;
1379 /* Set the pointer to the AEU descriptors */
1380 sb_info->p_aeu_desc = aeu_descs;
1382 /* Calculate Parity Masks */
1383 OSAL_MEMSET(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS);
1384 for (i = 0; i < NUM_ATTN_REGS; i++) {
1385 /* j is array index, k is bit index */
1386 for (j = 0, k = 0; k < 32; j++) {
1387 struct aeu_invert_reg_bit *p_aeu;
1389 p_aeu = &aeu_descs[i].bits[j];
1390 if (ecore_int_is_parity_flag(p_hwfn, p_aeu))
1391 sb_info->parity_mask[i] |= 1 << k;
1393 k += ATTENTION_LENGTH(p_aeu->flags);
1395 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1396 "Attn Mask [Reg %d]: 0x%08x\n",
1397 i, sb_info->parity_mask[i]);
1400 /* Set the address of cleanup for the mcp attention */
1401 sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
1402 MISC_REG_AEU_GENERAL_ATTN_0;
1404 ecore_int_sb_attn_setup(p_hwfn, p_ptt);
1407 static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn,
1408 struct ecore_ptt *p_ptt)
1410 struct ecore_dev *p_dev = p_hwfn->p_dev;
1411 struct ecore_sb_attn_info *p_sb;
1412 dma_addr_t p_phys = 0;
1416 p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(*p_sb));
1418 DP_NOTICE(p_dev, false, "Failed to allocate `struct ecore_sb_attn_info'\n");
1423 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
1424 SB_ATTN_ALIGNED_SIZE(p_hwfn));
1426 DP_NOTICE(p_dev, false, "Failed to allocate status block (attentions)\n");
1427 OSAL_FREE(p_dev, p_sb);
1431 /* Attention setup */
1432 p_hwfn->p_sb_attn = p_sb;
1433 ecore_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys);
1435 return ECORE_SUCCESS;
1438 /* coalescing timeout = timeset << (timer_res + 1) */
1439 #define ECORE_CAU_DEF_RX_USECS 24
1440 #define ECORE_CAU_DEF_TX_USECS 48
1442 void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
1443 struct cau_sb_entry *p_sb_entry,
1444 u8 pf_id, u16 vf_number, u8 vf_valid)
1446 struct ecore_dev *p_dev = p_hwfn->p_dev;
1450 OSAL_MEMSET(p_sb_entry, 0, sizeof(*p_sb_entry));
1452 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
1453 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
1454 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
1455 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
1456 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
1458 cau_state = CAU_HC_DISABLE_STATE;
1460 if (p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
1461 cau_state = CAU_HC_ENABLE_STATE;
1462 if (!p_dev->rx_coalesce_usecs)
1463 p_dev->rx_coalesce_usecs = ECORE_CAU_DEF_RX_USECS;
1464 if (!p_dev->tx_coalesce_usecs)
1465 p_dev->tx_coalesce_usecs = ECORE_CAU_DEF_TX_USECS;
1468 /* Coalesce = (timeset << timer-res), timeset is 7bit wide */
1469 if (p_dev->rx_coalesce_usecs <= 0x7F)
1471 else if (p_dev->rx_coalesce_usecs <= 0xFF)
1475 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
1477 if (p_dev->tx_coalesce_usecs <= 0x7F)
1479 else if (p_dev->tx_coalesce_usecs <= 0xFF)
1483 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
1485 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
1486 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
1489 static void _ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
1490 struct ecore_ptt *p_ptt,
1491 u16 igu_sb_id, u32 pi_index,
1492 enum ecore_coalescing_fsm coalescing_fsm,
1495 struct cau_pi_entry pi_entry;
1496 u32 sb_offset, pi_offset;
1498 if (IS_VF(p_hwfn->p_dev))
1499 return;/* @@@TBD MichalK- VF CAU... */
1501 sb_offset = igu_sb_id * PIS_PER_SB_E4;
1502 OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));
1504 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
1505 if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE)
1506 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
1508 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
1510 pi_offset = sb_offset + pi_index;
1511 if (p_hwfn->hw_init_done) {
1512 ecore_wr(p_hwfn, p_ptt,
1513 CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
1514 *((u32 *)&(pi_entry)));
1516 STORE_RT_REG(p_hwfn,
1517 CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
1518 *((u32 *)&(pi_entry)));
1522 void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
1523 struct ecore_ptt *p_ptt,
1524 struct ecore_sb_info *p_sb, u32 pi_index,
1525 enum ecore_coalescing_fsm coalescing_fsm,
1528 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, p_sb->igu_sb_id,
1529 pi_index, coalescing_fsm, timeset);
1532 void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
1533 struct ecore_ptt *p_ptt,
1534 dma_addr_t sb_phys, u16 igu_sb_id,
1535 u16 vf_number, u8 vf_valid)
1537 struct cau_sb_entry sb_entry;
1539 ecore_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
1540 vf_number, vf_valid);
1542 if (p_hwfn->hw_init_done) {
1543 /* Wide-bus, initialize via DMAE */
1544 u64 phys_addr = (u64)sb_phys;
1546 ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&phys_addr,
1547 CAU_REG_SB_ADDR_MEMORY +
1548 igu_sb_id * sizeof(u64), 2,
1549 OSAL_NULL /* default parameters */);
1550 ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&sb_entry,
1551 CAU_REG_SB_VAR_MEMORY +
1552 igu_sb_id * sizeof(u64), 2,
1553 OSAL_NULL /* default parameters */);
1555 /* Initialize Status Block Address */
1556 STORE_RT_REG_AGG(p_hwfn,
1557 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET+igu_sb_id*2,
1560 STORE_RT_REG_AGG(p_hwfn,
1561 CAU_REG_SB_VAR_MEMORY_RT_OFFSET+igu_sb_id*2,
1565 /* Configure pi coalescing if set */
1566 if (p_hwfn->p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
1567 /* eth will open queues for all tcs, so configure all of them
1568 * properly, rather than just the active ones
1570 u8 num_tc = p_hwfn->hw_info.num_hw_tc;
1572 u8 timeset, timer_res;
1575 /* timeset = (coalesce >> timer-res), timeset is 7bit wide */
1576 if (p_hwfn->p_dev->rx_coalesce_usecs <= 0x7F)
1578 else if (p_hwfn->p_dev->rx_coalesce_usecs <= 0xFF)
1582 timeset = (u8)(p_hwfn->p_dev->rx_coalesce_usecs >> timer_res);
1583 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
1584 ECORE_COAL_RX_STATE_MACHINE,
1587 if (p_hwfn->p_dev->tx_coalesce_usecs <= 0x7F)
1589 else if (p_hwfn->p_dev->tx_coalesce_usecs <= 0xFF)
1593 timeset = (u8)(p_hwfn->p_dev->tx_coalesce_usecs >> timer_res);
1594 for (i = 0; i < num_tc; i++) {
1595 _ecore_int_cau_conf_pi(p_hwfn, p_ptt,
1596 igu_sb_id, TX_PI(i),
1597 ECORE_COAL_TX_STATE_MACHINE,
1603 void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
1604 struct ecore_ptt *p_ptt,
1605 struct ecore_sb_info *sb_info)
1607 /* zero status block and ack counter */
1608 sb_info->sb_ack = 0;
1609 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1611 if (IS_PF(p_hwfn->p_dev))
1612 ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
1613 sb_info->igu_sb_id, 0, 0);
1616 struct ecore_igu_block *
1617 ecore_get_igu_free_sb(struct ecore_hwfn *p_hwfn, bool b_is_pf)
1619 struct ecore_igu_block *p_block;
1622 for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
1624 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
1626 if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
1627 !(p_block->status & ECORE_IGU_STATUS_FREE))
1630 if (!!(p_block->status & ECORE_IGU_STATUS_PF) ==
1638 static u16 ecore_get_pf_igu_sb_id(struct ecore_hwfn *p_hwfn,
1641 struct ecore_igu_block *p_block;
1644 for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
1646 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
1648 if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
1650 p_block->vector_number != vector_id)
1656 return ECORE_SB_INVALID_IDX;
1659 u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
1663 /* Assuming continuous set of IGU SBs dedicated for given PF */
1664 if (sb_id == ECORE_SP_SB_ID)
1665 igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
1666 else if (IS_PF(p_hwfn->p_dev))
1667 igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
1669 igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id);
1671 if (igu_sb_id == ECORE_SB_INVALID_IDX)
1672 DP_NOTICE(p_hwfn, true,
1673 "Slowpath SB vector %04x doesn't exist\n",
1675 else if (sb_id == ECORE_SP_SB_ID)
1676 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1677 "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
1679 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1680 "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
1685 enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
1686 struct ecore_ptt *p_ptt,
1687 struct ecore_sb_info *sb_info,
1689 dma_addr_t sb_phy_addr,
1692 sb_info->sb_virt = sb_virt_addr;
1693 sb_info->sb_phys = sb_phy_addr;
1695 sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id);
1697 if (sb_info->igu_sb_id == ECORE_SB_INVALID_IDX)
1700 /* Let the igu info reference the client's SB info */
1701 if (sb_id != ECORE_SP_SB_ID) {
1702 if (IS_PF(p_hwfn->p_dev)) {
1703 struct ecore_igu_info *p_info;
1704 struct ecore_igu_block *p_block;
1706 p_info = p_hwfn->hw_info.p_igu_info;
1707 p_block = &p_info->entry[sb_info->igu_sb_id];
1709 p_block->sb_info = sb_info;
1710 p_block->status &= ~ECORE_IGU_STATUS_FREE;
1711 p_info->usage.free_cnt--;
1713 ecore_vf_set_sb_info(p_hwfn, sb_id, sb_info);
1717 #ifdef ECORE_CONFIG_DIRECT_HWFN
1718 sb_info->p_hwfn = p_hwfn;
1720 sb_info->p_dev = p_hwfn->p_dev;
1722 /* The igu address will hold the absolute address that needs to be
1723 * written to for a specific status block
1725 if (IS_PF(p_hwfn->p_dev)) {
1726 sb_info->igu_addr = (u8 OSAL_IOMEM*)p_hwfn->regview +
1727 GTT_BAR0_MAP_REG_IGU_CMD +
1728 (sb_info->igu_sb_id << 3);
1732 (u8 OSAL_IOMEM*)p_hwfn->regview +
1733 PXP_VF_BAR0_START_IGU +
1734 ((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3);
1737 sb_info->flags |= ECORE_SB_INFO_INIT;
1739 ecore_int_sb_setup(p_hwfn, p_ptt, sb_info);
1741 return ECORE_SUCCESS;
1744 enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
1745 struct ecore_sb_info *sb_info,
1748 struct ecore_igu_info *p_info;
1749 struct ecore_igu_block *p_block;
1751 if (sb_info == OSAL_NULL)
1752 return ECORE_SUCCESS;
1754 /* zero status block and ack counter */
1755 sb_info->sb_ack = 0;
1756 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1758 if (IS_VF(p_hwfn->p_dev)) {
1759 ecore_vf_set_sb_info(p_hwfn, sb_id, OSAL_NULL);
1760 return ECORE_SUCCESS;
1763 p_info = p_hwfn->hw_info.p_igu_info;
1764 p_block = &p_info->entry[sb_info->igu_sb_id];
1766 /* Vector 0 is reserved to Default SB */
1767 if (p_block->vector_number == 0) {
1768 DP_ERR(p_hwfn, "Do Not free sp sb using this function");
1772 /* Lose reference to client's SB info, and fix counters */
1773 p_block->sb_info = OSAL_NULL;
1774 p_block->status |= ECORE_IGU_STATUS_FREE;
1775 p_info->usage.free_cnt++;
1777 return ECORE_SUCCESS;
1780 static void ecore_int_sp_sb_free(struct ecore_hwfn *p_hwfn)
1782 struct ecore_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
1787 if (p_sb->sb_info.sb_virt) {
1788 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
1789 p_sb->sb_info.sb_virt,
1790 p_sb->sb_info.sb_phys,
1791 SB_ALIGNED_SIZE(p_hwfn));
1794 OSAL_FREE(p_hwfn->p_dev, p_sb);
1795 p_hwfn->p_sp_sb = OSAL_NULL;
1798 static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
1799 struct ecore_ptt *p_ptt)
1801 struct ecore_sb_sp_info *p_sb;
1802 dma_addr_t p_phys = 0;
1806 p_sb = OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sb));
1808 DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sb_info'\n");
1813 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
1815 SB_ALIGNED_SIZE(p_hwfn));
1817 DP_NOTICE(p_hwfn, false, "Failed to allocate status block\n");
1818 OSAL_FREE(p_hwfn->p_dev, p_sb);
1823 /* Status Block setup */
1824 p_hwfn->p_sp_sb = p_sb;
1825 ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info,
1826 p_virt, p_phys, ECORE_SP_SB_ID);
1828 OSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
1830 return ECORE_SUCCESS;
1833 enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
1834 ecore_int_comp_cb_t comp_cb,
1839 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
1840 enum _ecore_status_t rc = ECORE_NOMEM;
1843 /* Look for a free index */
1844 for (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
1845 if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL)
1848 p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
1849 p_sp_sb->pi_info_arr[pi].cookie = cookie;
1851 *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
1859 enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn,
1862 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
1864 if (p_sp_sb->pi_info_arr[pi].comp_cb == OSAL_NULL)
1867 p_sp_sb->pi_info_arr[pi].comp_cb = OSAL_NULL;
1868 p_sp_sb->pi_info_arr[pi].cookie = OSAL_NULL;
1870 return ECORE_SUCCESS;
1873 u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn)
1875 return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
1878 void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn,
1879 struct ecore_ptt *p_ptt,
1880 enum ecore_int_mode int_mode)
1882 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
1885 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1886 DP_INFO(p_hwfn, "FPGA - don't enable ATTN generation in IGU\n");
1887 igu_pf_conf &= ~IGU_PF_CONF_ATTN_BIT_EN;
1891 p_hwfn->p_dev->int_mode = int_mode;
1892 switch (p_hwfn->p_dev->int_mode) {
1893 case ECORE_INT_MODE_INTA:
1894 igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
1895 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1898 case ECORE_INT_MODE_MSI:
1899 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1900 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1903 case ECORE_INT_MODE_MSIX:
1904 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1906 case ECORE_INT_MODE_POLL:
1910 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
1913 static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn,
1914 struct ecore_ptt *p_ptt)
1917 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1918 DP_INFO(p_hwfn, "FPGA - Don't enable Attentions in IGU and MISC\n");
1923 /* Configure AEU signal change to produce attentions */
1924 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
1925 ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
1926 ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
1927 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
1929 /* Flush the writes to IGU */
1930 OSAL_MMIOWB(p_hwfn->p_dev);
1932 /* Unmask AEU signals toward IGU */
1933 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
1936 enum _ecore_status_t
1937 ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1938 enum ecore_int_mode int_mode)
1940 enum _ecore_status_t rc = ECORE_SUCCESS;
1942 ecore_int_igu_enable_attn(p_hwfn, p_ptt);
1944 if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
1945 rc = OSAL_SLOWPATH_IRQ_REQ(p_hwfn);
1946 if (rc != ECORE_SUCCESS) {
1947 DP_NOTICE(p_hwfn, true, "Slowpath IRQ request failed\n");
1948 return ECORE_NORESOURCES;
1950 p_hwfn->b_int_requested = true;
1953 /* Enable interrupt Generation */
1954 ecore_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
1956 p_hwfn->b_int_enabled = 1;
1961 void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
1962 struct ecore_ptt *p_ptt)
1964 p_hwfn->b_int_enabled = 0;
1966 if (IS_VF(p_hwfn->p_dev))
1969 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
1972 #define IGU_CLEANUP_SLEEP_LENGTH (1000)
1973 static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
1974 struct ecore_ptt *p_ptt,
1979 u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
1980 u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id;
1981 u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
1982 u8 type = 0; /* FIXME MichalS type??? */
1984 OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 -
1985 IGU_REG_CLEANUP_STATUS_0) != 0x200);
1987 /* USE Control Command Register to perform cleanup. There is an
1988 * option to do this using IGU bar, but then it can't be used for VFs.
1991 /* Set the data field */
1992 SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
1993 SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, type);
1994 SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
1996 /* Set the control register */
1997 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
1998 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
1999 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
2001 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
2003 OSAL_BARRIER(p_hwfn->p_dev);
2005 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
2007 /* Flush the write to IGU */
2008 OSAL_MMIOWB(p_hwfn->p_dev);
2010 /* calculate where to read the status bit from */
2011 sb_bit = 1 << (igu_sb_id % 32);
2012 sb_bit_addr = igu_sb_id / 32 * sizeof(u32);
2014 sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type);
2016 /* Now wait for the command to complete */
2017 while (--sleep_cnt) {
2018 val = ecore_rd(p_hwfn, p_ptt, sb_bit_addr);
2019 if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
2025 DP_NOTICE(p_hwfn, true,
2026 "Timeout waiting for clear status 0x%08x [for sb %d]\n",
2030 void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
2031 struct ecore_ptt *p_ptt,
2032 u16 igu_sb_id, u16 opaque, bool b_set)
2034 struct ecore_igu_block *p_block;
2037 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
2038 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2039 "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n",
2040 igu_sb_id, p_block->function_id, p_block->is_pf,
2041 p_block->vector_number);
2045 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque);
2048 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque);
2050 /* Wait for the IGU SB to cleanup */
2051 for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) {
2054 val = ecore_rd(p_hwfn, p_ptt,
2055 IGU_REG_WRITE_DONE_PENDING +
2056 ((igu_sb_id / 32) * 4));
2057 if (val & (1 << (igu_sb_id % 32)))
2062 if (i == IGU_CLEANUP_SLEEP_LENGTH)
2063 DP_NOTICE(p_hwfn, true,
2064 "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
2067 /* Clear the CAU for the SB */
2068 for (pi = 0; pi < 12; pi++)
2069 ecore_wr(p_hwfn, p_ptt,
2070 CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0);
2073 void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
2074 struct ecore_ptt *p_ptt,
2078 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
2079 struct ecore_igu_block *p_block;
2083 /* @@@TBD MichalK temporary... should be moved to init-tool... */
2084 val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
2085 val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
2086 val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
2087 ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
2091 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
2093 p_block = &p_info->entry[igu_sb_id];
2095 if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
2097 (p_block->status & ECORE_IGU_STATUS_DSB))
2100 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id,
2101 p_hwfn->hw_info.opaque_fid,
2106 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
2108 p_hwfn->hw_info.opaque_fid,
2112 int ecore_int_igu_reset_cam(struct ecore_hwfn *p_hwfn,
2113 struct ecore_ptt *p_ptt)
2115 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
2116 struct ecore_igu_block *p_block;
2121 if (!RESC_NUM(p_hwfn, ECORE_SB)) {
2122 /* We're using an old MFW - have to prevent any switching
2123 * of SBs between PF and VFs as later driver wouldn't be
2124 * able to tell which belongs to which.
2126 p_info->b_allow_pf_vf_change = false;
2128 /* Use the numbers the MFW have provided -
2129 * don't forget MFW accounts for the default SB as well.
2131 p_info->b_allow_pf_vf_change = true;
2133 if (p_info->usage.cnt != RESC_NUM(p_hwfn, ECORE_SB) - 1) {
2135 "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n",
2136 RESC_NUM(p_hwfn, ECORE_SB) - 1,
2138 p_info->usage.cnt = RESC_NUM(p_hwfn, ECORE_SB) - 1;
2141 /* TODO - how do we learn about VF SBs from MFW? */
2142 if (IS_PF_SRIOV(p_hwfn)) {
2143 u16 vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
2145 if (vfs != p_info->usage.iov_cnt)
2146 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2147 "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n",
2148 p_info->usage.iov_cnt, vfs);
2150 /* At this point we know how many SBs we have totally
2151 * in IGU + number of PF SBs. So we can validate that
2152 * we'd have sufficient for VF.
2154 if (vfs > p_info->usage.free_cnt +
2155 p_info->usage.free_cnt_iov -
2156 p_info->usage.cnt) {
2157 DP_NOTICE(p_hwfn, true,
2158 "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n",
2159 p_info->usage.free_cnt +
2160 p_info->usage.free_cnt_iov,
2161 p_info->usage.cnt, vfs);
2167 /* Cap the number of VFs SBs by the number of VFs */
2168 if (IS_PF_SRIOV(p_hwfn))
2169 p_info->usage.iov_cnt = p_hwfn->p_dev->p_iov_info->total_vfs;
2171 /* Mark all SBs as free, now in the right PF/VFs division */
2172 p_info->usage.free_cnt = p_info->usage.cnt;
2173 p_info->usage.free_cnt_iov = p_info->usage.iov_cnt;
2174 p_info->usage.orig = p_info->usage.cnt;
2175 p_info->usage.iov_orig = p_info->usage.iov_cnt;
2177 /* We now proceed to re-configure the IGU cam to reflect the initial
2178 * configuration. We can start with the Default SB.
2180 pf_sbs = p_info->usage.cnt;
2181 vf_sbs = p_info->usage.iov_cnt;
2183 for (igu_sb_id = p_info->igu_dsb_id;
2184 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
2186 p_block = &p_info->entry[igu_sb_id];
2189 if (!(p_block->status & ECORE_IGU_STATUS_VALID))
2192 if (p_block->status & ECORE_IGU_STATUS_DSB) {
2193 p_block->function_id = p_hwfn->rel_pf_id;
2195 p_block->vector_number = 0;
2196 p_block->status = ECORE_IGU_STATUS_VALID |
2197 ECORE_IGU_STATUS_PF |
2198 ECORE_IGU_STATUS_DSB;
2199 } else if (pf_sbs) {
2201 p_block->function_id = p_hwfn->rel_pf_id;
2203 p_block->vector_number = p_info->usage.cnt - pf_sbs;
2204 p_block->status = ECORE_IGU_STATUS_VALID |
2205 ECORE_IGU_STATUS_PF |
2206 ECORE_IGU_STATUS_FREE;
2207 } else if (vf_sbs) {
2208 p_block->function_id =
2209 p_hwfn->p_dev->p_iov_info->first_vf_in_pf +
2210 p_info->usage.iov_cnt - vf_sbs;
2212 p_block->vector_number = 0;
2213 p_block->status = ECORE_IGU_STATUS_VALID |
2214 ECORE_IGU_STATUS_FREE;
2217 p_block->function_id = 0;
2219 p_block->vector_number = 0;
2222 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
2223 p_block->function_id);
2224 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf);
2225 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER,
2226 p_block->vector_number);
2228 /* VF entries would be enabled when VF is initializaed */
2229 SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf);
2231 rval = ecore_rd(p_hwfn, p_ptt,
2232 IGU_REG_MAPPING_MEMORY +
2233 sizeof(u32) * igu_sb_id);
2236 ecore_wr(p_hwfn, p_ptt,
2237 IGU_REG_MAPPING_MEMORY +
2238 sizeof(u32) * igu_sb_id,
2241 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2242 "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n",
2243 igu_sb_id, p_block->function_id,
2244 p_block->is_pf, p_block->vector_number,
2252 int ecore_int_igu_reset_cam_default(struct ecore_hwfn *p_hwfn,
2253 struct ecore_ptt *p_ptt)
2255 struct ecore_sb_cnt_info *p_cnt = &p_hwfn->hw_info.p_igu_info->usage;
2257 /* Return all the usage indications to default prior to the reset;
2258 * The reset expects the !orig to reflect the initial status of the
2259 * SBs, and would re-calculate the originals based on those.
2261 p_cnt->cnt = p_cnt->orig;
2262 p_cnt->free_cnt = p_cnt->orig;
2263 p_cnt->iov_cnt = p_cnt->iov_orig;
2264 p_cnt->free_cnt_iov = p_cnt->iov_orig;
2266 p_cnt->iov_orig = 0;
2268 /* TODO - we probably need to re-configure the CAU as well... */
2269 return ecore_int_igu_reset_cam(p_hwfn, p_ptt);
2272 static void ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn,
2273 struct ecore_ptt *p_ptt,
2276 u32 val = ecore_rd(p_hwfn, p_ptt,
2277 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id);
2278 struct ecore_igu_block *p_block;
2280 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
2282 /* Fill the block information */
2283 p_block->function_id = GET_FIELD(val,
2284 IGU_MAPPING_LINE_FUNCTION_NUMBER);
2285 p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
2286 p_block->vector_number = GET_FIELD(val,
2287 IGU_MAPPING_LINE_VECTOR_NUMBER);
2288 p_block->igu_sb_id = igu_sb_id;
2291 enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
2292 struct ecore_ptt *p_ptt)
2294 struct ecore_igu_info *p_igu_info;
2295 struct ecore_igu_block *p_block;
2296 u32 min_vf = 0, max_vf = 0;
2299 p_hwfn->hw_info.p_igu_info = OSAL_ZALLOC(p_hwfn->p_dev,
2301 sizeof(*p_igu_info));
2302 if (!p_hwfn->hw_info.p_igu_info)
2304 p_igu_info = p_hwfn->hw_info.p_igu_info;
2306 /* Distinguish between existent and onn-existent default SB */
2307 p_igu_info->igu_dsb_id = ECORE_SB_INVALID_IDX;
2309 /* Find the range of VF ids whose SB belong to this PF */
2310 if (p_hwfn->p_dev->p_iov_info) {
2311 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
2313 min_vf = p_iov->first_vf_in_pf;
2314 max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs;
2318 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
2320 /* Read current entry; Notice it might not belong to this PF */
2321 ecore_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id);
2322 p_block = &p_igu_info->entry[igu_sb_id];
2324 if ((p_block->is_pf) &&
2325 (p_block->function_id == p_hwfn->rel_pf_id)) {
2326 p_block->status = ECORE_IGU_STATUS_PF |
2327 ECORE_IGU_STATUS_VALID |
2328 ECORE_IGU_STATUS_FREE;
2330 if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX)
2331 p_igu_info->usage.cnt++;
2332 } else if (!(p_block->is_pf) &&
2333 (p_block->function_id >= min_vf) &&
2334 (p_block->function_id < max_vf)) {
2335 /* Available for VFs of this PF */
2336 p_block->status = ECORE_IGU_STATUS_VALID |
2337 ECORE_IGU_STATUS_FREE;
2339 if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX)
2340 p_igu_info->usage.iov_cnt++;
2343 /* Mark the First entry belonging to the PF or its VFs
2344 * as the default SB [we'll reset IGU prior to first usage].
2346 if ((p_block->status & ECORE_IGU_STATUS_VALID) &&
2347 (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX)) {
2348 p_igu_info->igu_dsb_id = igu_sb_id;
2349 p_block->status |= ECORE_IGU_STATUS_DSB;
2352 /* While this isn't suitable for all clients, limit number
2353 * of prints by having each PF print only its entries with the
2354 * exception of PF0 which would print everything.
2356 if ((p_block->status & ECORE_IGU_STATUS_VALID) ||
2357 (p_hwfn->abs_pf_id == 0))
2358 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2359 "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
2360 igu_sb_id, p_block->function_id,
2361 p_block->is_pf, p_block->vector_number);
2364 if (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX) {
2365 DP_NOTICE(p_hwfn, true,
2366 "IGU CAM returned invalid values igu_dsb_id=0x%x\n",
2367 p_igu_info->igu_dsb_id);
2371 /* All non default SB are considered free at this point */
2372 p_igu_info->usage.free_cnt = p_igu_info->usage.cnt;
2373 p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt;
2375 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2376 "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n",
2377 p_igu_info->igu_dsb_id, p_igu_info->usage.cnt,
2378 p_igu_info->usage.iov_cnt);
2380 return ECORE_SUCCESS;
2383 enum _ecore_status_t
2384 ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2385 u16 sb_id, bool b_to_vf)
2387 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
2388 struct ecore_igu_block *p_block = OSAL_NULL;
2389 u16 igu_sb_id = 0, vf_num = 0;
2392 if (IS_VF(p_hwfn->p_dev) || !IS_PF_SRIOV(p_hwfn))
2395 if (sb_id == ECORE_SP_SB_ID)
2398 if (!p_info->b_allow_pf_vf_change) {
2399 DP_INFO(p_hwfn, "Can't relocate SBs as MFW is too old.\n");
2403 /* If we're moving a SB from PF to VF, the client had to specify
2404 * which vector it wants to move.
2407 igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
2408 if (igu_sb_id == ECORE_SB_INVALID_IDX)
2412 /* If we're moving a SB from VF to PF, need to validate there isn't
2413 * already a line configured for that vector.
2416 if (ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1) !=
2417 ECORE_SB_INVALID_IDX)
2421 /* We need to validate that the SB can actually be relocated.
2422 * This would also handle the previous case where we've explicitly
2423 * stated which IGU SB needs to move.
2425 for (; igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
2427 p_block = &p_info->entry[igu_sb_id];
2429 if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
2430 !(p_block->status & ECORE_IGU_STATUS_FREE) ||
2431 (!!(p_block->status & ECORE_IGU_STATUS_PF) != b_to_vf)) {
2441 if (igu_sb_id == ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev)) {
2442 DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV),
2443 "Failed to find a free SB to move\n");
2447 if (p_block == OSAL_NULL) {
2448 DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV),
2449 "SB address (p_block) is NULL\n");
2453 /* At this point, p_block points to the SB we want to relocate */
2455 p_block->status &= ~ECORE_IGU_STATUS_PF;
2457 /* It doesn't matter which VF number we choose, since we're
2458 * going to disable the line; But let's keep it in range.
2460 vf_num = (u16)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
2462 p_block->function_id = (u8)vf_num;
2464 p_block->vector_number = 0;
2466 p_info->usage.cnt--;
2467 p_info->usage.free_cnt--;
2468 p_info->usage.iov_cnt++;
2469 p_info->usage.free_cnt_iov++;
2471 /* TODO - if SBs aren't really the limiting factor,
2472 * then it might not be accurate [in the since that
2473 * we might not need decrement the feature].
2475 p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]--;
2476 p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]++;
2478 p_block->status |= ECORE_IGU_STATUS_PF;
2479 p_block->function_id = p_hwfn->rel_pf_id;
2481 p_block->vector_number = sb_id + 1;
2483 p_info->usage.cnt++;
2484 p_info->usage.free_cnt++;
2485 p_info->usage.iov_cnt--;
2486 p_info->usage.free_cnt_iov--;
2488 p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]++;
2489 p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]--;
2492 /* Update the IGU and CAU with the new configuration */
2493 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
2494 p_block->function_id);
2495 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf);
2496 SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf);
2497 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER,
2498 p_block->vector_number);
2500 ecore_wr(p_hwfn, p_ptt,
2501 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id,
2504 ecore_int_cau_conf_sb(p_hwfn, p_ptt, 0,
2506 p_block->is_pf ? 0 : 1);
2508 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2509 "Relocation: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
2510 igu_sb_id, p_block->function_id,
2511 p_block->is_pf, p_block->vector_number);
2513 return ECORE_SUCCESS;
2517 * @brief Initialize igu runtime registers
2521 void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn)
2523 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
2525 STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
2528 #define LSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - \
2529 IGU_CMD_INT_ACK_BASE)
2530 #define MSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - \
2531 IGU_CMD_INT_ACK_BASE)
2532 u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn)
2534 u32 intr_status_hi = 0, intr_status_lo = 0;
2535 u64 intr_status = 0;
2537 intr_status_lo = REG_RD(p_hwfn,
2538 GTT_BAR0_MAP_REG_IGU_CMD +
2539 LSB_IGU_CMD_ADDR * 8);
2540 intr_status_hi = REG_RD(p_hwfn,
2541 GTT_BAR0_MAP_REG_IGU_CMD +
2542 MSB_IGU_CMD_ADDR * 8);
2543 intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
2548 static void ecore_int_sp_dpc_setup(struct ecore_hwfn *p_hwfn)
2550 OSAL_DPC_INIT(p_hwfn->sp_dpc, p_hwfn);
2551 p_hwfn->b_sp_dpc_enabled = true;
2554 static enum _ecore_status_t ecore_int_sp_dpc_alloc(struct ecore_hwfn *p_hwfn)
2556 p_hwfn->sp_dpc = OSAL_DPC_ALLOC(p_hwfn);
2557 if (!p_hwfn->sp_dpc)
2560 return ECORE_SUCCESS;
2563 static void ecore_int_sp_dpc_free(struct ecore_hwfn *p_hwfn)
2565 OSAL_FREE(p_hwfn->p_dev, p_hwfn->sp_dpc);
2566 p_hwfn->sp_dpc = OSAL_NULL;
2569 enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn,
2570 struct ecore_ptt *p_ptt)
2572 enum _ecore_status_t rc = ECORE_SUCCESS;
2574 rc = ecore_int_sp_dpc_alloc(p_hwfn);
2575 if (rc != ECORE_SUCCESS) {
2576 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp dpc mem\n");
2580 rc = ecore_int_sp_sb_alloc(p_hwfn, p_ptt);
2581 if (rc != ECORE_SUCCESS) {
2582 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp sb mem\n");
2586 rc = ecore_int_sb_attn_alloc(p_hwfn, p_ptt);
2587 if (rc != ECORE_SUCCESS)
2588 DP_ERR(p_hwfn->p_dev, "Failed to allocate sb attn mem\n");
2593 void ecore_int_free(struct ecore_hwfn *p_hwfn)
2595 ecore_int_sp_sb_free(p_hwfn);
2596 ecore_int_sb_attn_free(p_hwfn);
2597 ecore_int_sp_dpc_free(p_hwfn);
2600 void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
2602 if (!p_hwfn || !p_hwfn->p_sp_sb || !p_hwfn->p_sb_attn)
2605 ecore_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
2606 ecore_int_sb_attn_setup(p_hwfn, p_ptt);
2607 ecore_int_sp_dpc_setup(p_hwfn);
2610 void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
2611 struct ecore_sb_cnt_info *p_sb_cnt_info)
2613 struct ecore_igu_info *p_igu_info = p_hwfn->hw_info.p_igu_info;
2615 if (!p_igu_info || !p_sb_cnt_info)
2618 OSAL_MEMCPY(p_sb_cnt_info, &p_igu_info->usage,
2619 sizeof(*p_sb_cnt_info));
2622 void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev)
2626 for_each_hwfn(p_dev, i)
2627 p_dev->hwfns[i].b_int_requested = false;
2630 void ecore_int_attn_clr_enable(struct ecore_dev *p_dev, bool clr_enable)
2632 p_dev->attn_clr_en = clr_enable;
2635 enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
2636 struct ecore_ptt *p_ptt,
2637 u8 timer_res, u16 sb_id, bool tx)
2639 struct cau_sb_entry sb_entry;
2640 enum _ecore_status_t rc;
2642 if (!p_hwfn->hw_init_done) {
2643 DP_ERR(p_hwfn, "hardware not initialized yet\n");
2647 rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2648 sb_id * sizeof(u64),
2649 (u64)(osal_uintptr_t)&sb_entry, 2,
2650 OSAL_NULL /* default parameters */);
2651 if (rc != ECORE_SUCCESS) {
2652 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2657 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
2659 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
2661 rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
2662 (u64)(osal_uintptr_t)&sb_entry,
2663 CAU_REG_SB_VAR_MEMORY + sb_id * sizeof(u64), 2,
2664 OSAL_NULL /* default parameters */);
2665 if (rc != ECORE_SUCCESS) {
2666 DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc);
2673 enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
2674 struct ecore_ptt *p_ptt,
2675 struct ecore_sb_info *p_sb,
2676 struct ecore_sb_info_dbg *p_info)
2678 u16 sbid = p_sb->igu_sb_id;
2681 if (IS_VF(p_hwfn->p_dev))
2684 if (sbid > NUM_OF_SBS(p_hwfn->p_dev))
2687 p_info->igu_prod = ecore_rd(p_hwfn, p_ptt,
2688 IGU_REG_PRODUCER_MEMORY + sbid * 4);
2689 p_info->igu_cons = ecore_rd(p_hwfn, p_ptt,
2690 IGU_REG_CONSUMER_MEM + sbid * 4);
2692 for (i = 0; i < PIS_PER_SB_E4; i++)
2693 p_info->pi[i] = (u16)ecore_rd(p_hwfn, p_ptt,
2695 sbid * 4 * PIS_PER_SB_E4 + i * 4);
2697 return ECORE_SUCCESS;