2 * Copyright (c) 2017-2018 Cavium, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
35 #include "ecore_spq.h"
37 #include "ecore_gtt_reg_addr.h"
38 #include "ecore_init_ops.h"
39 #include "ecore_rt_defs.h"
40 #include "ecore_int.h"
43 #include "ecore_sriov.h"
45 #include "ecore_hw_defs.h"
46 #include "ecore_hsi_common.h"
47 #include "ecore_mcp.h"
48 #include "ecore_dbg_fw_funcs.h"
51 /* This is nasty, but diag is using the drv_dbg_fw_funcs.c [non-ecore flavor],
52 * and so the functions are lacking ecore prefix.
53 * If there would be other clients needing this [or if the content that isn't
54 * really optional there would increase], we'll need to re-think this.
56 enum dbg_status dbg_read_attn(struct ecore_hwfn *dev,
57 struct ecore_ptt *ptt,
59 enum dbg_attn_type attn_type,
61 struct dbg_attn_block_result *results);
63 enum dbg_status dbg_parse_attn(struct ecore_hwfn *dev,
64 struct dbg_attn_block_result *results);
66 const char* dbg_get_status_str(enum dbg_status status);
68 #define ecore_dbg_read_attn(hwfn, ptt, id, type, clear, results) \
69 dbg_read_attn(hwfn, ptt, id, type, clear, results)
70 #define ecore_dbg_parse_attn(hwfn, results) \
71 dbg_parse_attn(hwfn, results)
72 #define ecore_dbg_get_status_str(status) \
73 dbg_get_status_str(status)
76 struct ecore_pi_info {
77 ecore_int_comp_cb_t comp_cb;
78 void *cookie; /* Will be sent to the completion callback function */
81 struct ecore_sb_sp_info {
82 struct ecore_sb_info sb_info;
83 /* per protocol index data */
84 struct ecore_pi_info pi_info_arr[PIS_PER_SB_E4];
87 enum ecore_attention_type {
89 ECORE_ATTN_TYPE_PARITY,
92 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
93 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
95 struct aeu_invert_reg_bit {
98 #define ATTENTION_PARITY (1 << 0)
100 #define ATTENTION_LENGTH_MASK (0x00000ff0)
101 #define ATTENTION_LENGTH_SHIFT (4)
102 #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
103 ATTENTION_LENGTH_SHIFT)
104 #define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT)
105 #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
106 #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
109 /* Multiple bits start with this offset */
110 #define ATTENTION_OFFSET_MASK (0x000ff000)
111 #define ATTENTION_OFFSET_SHIFT (12)
113 #define ATTENTION_BB_MASK (0x00700000)
114 #define ATTENTION_BB_SHIFT (20)
115 #define ATTENTION_BB(value) (value << ATTENTION_BB_SHIFT)
116 #define ATTENTION_BB_DIFFERENT (1 << 23)
118 #define ATTENTION_CLEAR_ENABLE (1 << 28)
121 /* Callback to call if attention will be triggered */
122 enum _ecore_status_t (*cb)(struct ecore_hwfn *p_hwfn);
124 enum block_id block_index;
127 struct aeu_invert_reg {
128 struct aeu_invert_reg_bit bits[32];
131 #define MAX_ATTN_GRPS (8)
132 #define NUM_ATTN_REGS (9)
134 static enum _ecore_status_t ecore_mcp_attn_cb(struct ecore_hwfn *p_hwfn)
136 u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE);
138 DP_INFO(p_hwfn->p_dev, "MCP_REG_CPU_STATE: %08x - Masking...\n",
140 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK,
143 return ECORE_SUCCESS;
146 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK (0x3c000)
147 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT (14)
148 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK (0x03fc0)
149 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT (6)
150 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK (0x00020)
151 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT (5)
152 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK (0x0001e)
153 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT (1)
154 #define ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK (0x1)
155 #define ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT (0)
156 #define ECORE_PSWHST_ATTENTION_VF_DISABLED (0x1)
157 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1)
158 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1)
159 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0)
160 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0x1e)
161 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1)
162 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x20)
163 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5)
164 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0x3fc0)
165 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6)
166 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0x3c000)
167 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14)
168 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0x3fc0000)
169 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18)
170 static enum _ecore_status_t ecore_pswhst_attn_cb(struct ecore_hwfn *p_hwfn)
172 u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, PSWHST_REG_VF_DISABLED_ERROR_VALID);
174 /* Disabled VF access */
175 if (tmp & ECORE_PSWHST_ATTENTION_VF_DISABLED) {
178 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
179 PSWHST_REG_VF_DISABLED_ERROR_ADDRESS);
180 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
181 PSWHST_REG_VF_DISABLED_ERROR_DATA);
182 DP_INFO(p_hwfn->p_dev, "PF[0x%02x] VF [0x%02x] [Valid 0x%02x] Client [0x%02x] Write [0x%02x] Addr [0x%08x]\n",
183 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK) >>
184 ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT),
185 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK) >>
186 ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT),
187 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK) >>
188 ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT),
189 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK) >>
190 ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT),
191 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK) >>
192 ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT),
196 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
197 PSWHST_REG_INCORRECT_ACCESS_VALID);
198 if (tmp & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS) {
199 u32 addr, data, length;
201 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
202 PSWHST_REG_INCORRECT_ACCESS_ADDRESS);
203 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
204 PSWHST_REG_INCORRECT_ACCESS_DATA);
205 length = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
206 PSWHST_REG_INCORRECT_ACCESS_LENGTH);
208 DP_INFO(p_hwfn->p_dev, "Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n",
210 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK) >>
211 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT),
212 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK) >>
213 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT),
214 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK) >>
215 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT),
216 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK) >>
217 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT),
218 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK) >>
219 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT),
220 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK) >>
221 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT),
225 /* TODO - We know 'some' of these are legal due to virtualization,
226 * but is it true for all of them?
228 return ECORE_SUCCESS;
231 #define ECORE_GRC_ATTENTION_VALID_BIT (1 << 0)
232 #define ECORE_GRC_ATTENTION_ADDRESS_MASK (0x7fffff << 0)
233 #define ECORE_GRC_ATTENTION_RDWR_BIT (1 << 23)
234 #define ECORE_GRC_ATTENTION_MASTER_MASK (0xf << 24)
235 #define ECORE_GRC_ATTENTION_MASTER_SHIFT (24)
236 #define ECORE_GRC_ATTENTION_PF_MASK (0xf)
237 #define ECORE_GRC_ATTENTION_VF_MASK (0xff << 4)
238 #define ECORE_GRC_ATTENTION_VF_SHIFT (4)
239 #define ECORE_GRC_ATTENTION_PRIV_MASK (0x3 << 14)
240 #define ECORE_GRC_ATTENTION_PRIV_SHIFT (14)
241 #define ECORE_GRC_ATTENTION_PRIV_VF (0)
242 static const char* grc_timeout_attn_master_to_str(u8 master)
245 case 1: return "PXP";
246 case 2: return "MCP";
247 case 3: return "MSDM";
248 case 4: return "PSDM";
249 case 5: return "YSDM";
250 case 6: return "USDM";
251 case 7: return "TSDM";
252 case 8: return "XSDM";
253 case 9: return "DBU";
254 case 10: return "DMAE";
260 static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn)
264 /* We've already cleared the timeout interrupt register, so we learn
265 * of interrupts via the validity register
267 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
268 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID);
269 if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT))
272 /* Read the GRC timeout information */
273 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
274 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0);
275 tmp2 = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
276 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
278 DP_NOTICE(p_hwfn->p_dev, false,
279 "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n",
281 (tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to"
283 (tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2,
284 grc_timeout_attn_master_to_str((tmp & ECORE_GRC_ATTENTION_MASTER_MASK) >>
285 ECORE_GRC_ATTENTION_MASTER_SHIFT),
286 (tmp2 & ECORE_GRC_ATTENTION_PF_MASK),
287 (((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >>
288 ECORE_GRC_ATTENTION_PRIV_SHIFT) ==
289 ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)",
290 (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >>
291 ECORE_GRC_ATTENTION_VF_SHIFT);
294 /* Regardles of anything else, clean the validity bit */
295 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
296 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0);
297 return ECORE_SUCCESS;
300 #define ECORE_PGLUE_ATTENTION_VALID (1 << 29)
301 #define ECORE_PGLUE_ATTENTION_RD_VALID (1 << 26)
302 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf << 20)
303 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20)
304 #define ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID (1 << 19)
305 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff << 24)
306 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24)
307 #define ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR (1 << 21)
308 #define ECORE_PGLUE_ATTENTION_DETAILS2_BME (1 << 22)
309 #define ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN (1 << 23)
310 #define ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23)
311 #define ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25)
312 #define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23)
313 static enum _ecore_status_t ecore_pglub_rbc_attn_cb(struct ecore_hwfn *p_hwfn)
317 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
318 PGLUE_B_REG_TX_ERR_WR_DETAILS2);
319 if (tmp & ECORE_PGLUE_ATTENTION_VALID) {
320 u32 addr_lo, addr_hi, details;
322 addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
323 PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
324 addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
325 PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
326 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
327 PGLUE_B_REG_TX_ERR_WR_DETAILS);
329 DP_INFO(p_hwfn, "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
330 addr_hi, addr_lo, details,
331 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
332 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
333 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0),
335 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0),
336 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0),
337 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0));
340 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
341 PGLUE_B_REG_TX_ERR_RD_DETAILS2);
342 if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) {
343 u32 addr_lo, addr_hi, details;
345 addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
346 PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
347 addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
348 PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
349 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
350 PGLUE_B_REG_TX_ERR_RD_DETAILS);
352 DP_INFO(p_hwfn, "Illegal read by chip from [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
353 addr_hi, addr_lo, details,
354 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
355 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
356 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0),
358 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0),
359 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0),
360 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0));
363 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
364 PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
365 if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID)
366 DP_INFO(p_hwfn, "ICPL eror - %08x\n", tmp);
368 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
369 PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
370 if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) {
371 u32 addr_hi, addr_lo;
373 addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
374 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
375 addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
376 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
378 DP_INFO(p_hwfn, "ICPL eror - %08x [Address %08x:%08x]\n",
379 tmp, addr_hi, addr_lo);
382 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
383 PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
384 if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) {
385 u32 addr_hi, addr_lo, details;
387 addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
388 PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
389 addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
390 PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
391 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
392 PGLUE_B_REG_VF_ILT_ERR_DETAILS);
394 DP_INFO(p_hwfn, "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
395 details, tmp, addr_hi, addr_lo);
398 /* Clear the indications */
399 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
400 PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
402 return ECORE_SUCCESS;
405 static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn)
407 DP_NOTICE(p_hwfn, false, "FW assertion!\n");
409 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FW_ASSERT);
414 static enum _ecore_status_t
415 ecore_general_attention_35(struct ecore_hwfn *p_hwfn)
417 DP_INFO(p_hwfn, "General attention 35!\n");
419 return ECORE_SUCCESS;
422 #define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff)
423 #define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
424 #define ECORE_DORQ_ATTENTION_OPAQUE_SHIFT (0x0)
425 #define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f)
426 #define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16)
428 #define ECORE_DB_REC_COUNT 10
429 #define ECORE_DB_REC_INTERVAL 100
431 /* assumes sticky overflow indication was set for this PF */
432 static enum _ecore_status_t ecore_db_rec_attn(struct ecore_hwfn *p_hwfn,
433 struct ecore_ptt *p_ptt)
435 u8 count = ECORE_DB_REC_COUNT;
438 /* wait for usage to zero or count to run out. This is necessary since
439 * EDPM doorbell transactions can take multiple 64b cycles, and as such
440 * can "split" over the pci. Possibly, the doorbell drop can happen with
441 * half an EDPM in the queue and other half dropped. Another EDPM
442 * doorbell to the same address (from doorbell recovery mechanism or
443 * from the doorbelling entity) could have first half dropped and second
444 * half interperted as continuation of the first. To prevent such
445 * malformed doorbells from reaching the device, flush the queue before
446 * releaseing the overflow sticky indication.
448 while (count-- && usage) {
449 usage = ecore_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT);
450 OSAL_UDELAY(ECORE_DB_REC_INTERVAL);
453 /* should have been depleted by now */
455 DP_NOTICE(p_hwfn->p_dev, false,
456 "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n",
457 ECORE_DB_REC_INTERVAL * ECORE_DB_REC_COUNT, usage);
458 return ECORE_TIMEOUT;
461 /* flush any pedning (e)dpm as they may never arrive */
462 ecore_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
464 /* release overflow sticky indication (stop silently dropping everything) */
465 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
467 /* repeat all last doorbells (doorbell drop recovery) */
468 ecore_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL);
470 return ECORE_SUCCESS;
473 static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn)
475 u32 int_sts, first_drop_reason, details, address, overflow,
477 struct ecore_ptt *p_ptt = p_hwfn->p_dpc_ptt;
478 enum _ecore_status_t rc;
480 int_sts = ecore_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
481 DP_NOTICE(p_hwfn->p_dev, false, "DORQ attention. int_sts was %x\n",
484 /* check if db_drop or overflow happened */
485 if (int_sts & (DORQ_REG_INT_STS_DB_DROP |
486 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) {
488 /* obtain data about db drop/overflow */
489 first_drop_reason = ecore_rd(p_hwfn, p_ptt,
490 DORQ_REG_DB_DROP_REASON) &
491 ECORE_DORQ_ATTENTION_REASON_MASK;
492 details = ecore_rd(p_hwfn, p_ptt,
493 DORQ_REG_DB_DROP_DETAILS);
494 address = ecore_rd(p_hwfn, p_ptt,
495 DORQ_REG_DB_DROP_DETAILS_ADDRESS);
496 overflow = ecore_rd(p_hwfn, p_ptt,
497 DORQ_REG_PF_OVFL_STICKY);
498 all_drops_reason = ecore_rd(p_hwfn, p_ptt,
499 DORQ_REG_DB_DROP_DETAILS_REASON);
502 DP_NOTICE(p_hwfn->p_dev, false,
503 "Doorbell drop occurred\n"
504 "Address\t\t0x%08x\t(second BAR address)\n"
505 "FID\t\t0x%04x\t\t(Opaque FID)\n"
506 "Size\t\t0x%04x\t\t(in bytes)\n"
507 "1st drop reason\t0x%08x\t(details on first drop since last handling)\n"
508 "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n"
509 "Overflow\t0x%x\t\t(a per PF indication)\n",
510 address, GET_FIELD(details, ECORE_DORQ_ATTENTION_OPAQUE),
511 GET_FIELD(details, ECORE_DORQ_ATTENTION_SIZE) * 4,
512 first_drop_reason, all_drops_reason, overflow);
514 /* if this PF caused overflow, initiate recovery */
516 rc = ecore_db_rec_attn(p_hwfn, p_ptt);
517 if (rc != ECORE_SUCCESS)
521 /* clear the doorbell drop details and prepare for next drop */
522 ecore_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0);
524 /* mark interrupt as handeld (note: even if drop was due to a diffrent
525 * reason than overflow we mark as handled)
527 ecore_wr(p_hwfn, p_ptt, DORQ_REG_INT_STS_WR,
528 DORQ_REG_INT_STS_DB_DROP | DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR);
530 /* if there are no indications otherthan drop indications, success */
531 if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP |
532 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR |
533 DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0)
534 return ECORE_SUCCESS;
537 /* some other indication was present - non recoverable */
538 DP_INFO(p_hwfn, "DORQ fatal attention\n");
543 static enum _ecore_status_t ecore_tm_attn_cb(struct ecore_hwfn *p_hwfn)
546 if (CHIP_REV_IS_EMUL_B0(p_hwfn->p_dev)) {
547 u32 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
550 if (val & ~(TM_REG_INT_STS_1_PEND_TASK_SCAN |
551 TM_REG_INT_STS_1_PEND_CONN_SCAN))
554 if (val & (TM_REG_INT_STS_1_PEND_TASK_SCAN |
555 TM_REG_INT_STS_1_PEND_CONN_SCAN))
556 DP_INFO(p_hwfn, "TM attention on emulation - most likely results of clock-ratios\n");
557 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1);
558 val |= TM_REG_INT_MASK_1_PEND_CONN_SCAN |
559 TM_REG_INT_MASK_1_PEND_TASK_SCAN;
560 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1, val);
562 return ECORE_SUCCESS;
569 /* Instead of major changes to the data-structure, we have a some 'special'
570 * identifiers for sources that changed meaning between adapters.
572 enum aeu_invert_reg_special_type {
573 AEU_INVERT_REG_SPECIAL_CNIG_0,
574 AEU_INVERT_REG_SPECIAL_CNIG_1,
575 AEU_INVERT_REG_SPECIAL_CNIG_2,
576 AEU_INVERT_REG_SPECIAL_CNIG_3,
577 AEU_INVERT_REG_SPECIAL_MAX,
580 static struct aeu_invert_reg_bit
581 aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = {
582 {"CNIG port 0", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
583 {"CNIG port 1", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
584 {"CNIG port 2", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
585 {"CNIG port 3", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
588 /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
589 static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] =
592 { /* After Invert 1 */
593 {"GPIO0 function%d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID},
598 { /* After Invert 2 */
599 {"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
600 {"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
601 {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglub_rbc_attn_cb, BLOCK_PGLUE_B},
602 {"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
603 {"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
604 {"SMB event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
605 {"Main Power", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
606 {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT) | (1 << ATTENTION_OFFSET_SHIFT), OSAL_NULL, MAX_BLOCK_ID},
607 {"PCIE glue/PXP VPD %d", (16 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, BLOCK_PGLCS},
612 { /* After Invert 3 */
613 {"General Attention %d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID},
618 { /* After Invert 4 */
619 {"General Attention 32", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, ecore_fw_assertion, MAX_BLOCK_ID},
620 {"General Attention %d", (2 << ATTENTION_LENGTH_SHIFT) | (33 << ATTENTION_OFFSET_SHIFT), OSAL_NULL, MAX_BLOCK_ID},
621 {"General Attention 35", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, ecore_general_attention_35, MAX_BLOCK_ID},
622 {"NWS Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
623 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0) , OSAL_NULL, BLOCK_NWS},
624 {"NWS Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
625 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1), OSAL_NULL, BLOCK_NWS},
626 {"NWM Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
627 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2), OSAL_NULL, BLOCK_NWM},
628 {"NWM Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
629 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3), OSAL_NULL, BLOCK_NWM},
630 {"MCP CPU", ATTENTION_SINGLE, ecore_mcp_attn_cb, MAX_BLOCK_ID},
631 {"MCP Watchdog timer", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
632 {"MCP M2P", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
633 {"AVS stop status ready", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
634 {"MSTAT", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
635 {"MSTAT per-path", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
636 {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID },
637 {"NIG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NIG},
638 {"BMB/OPTE/MCP", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB},
639 {"BTB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB},
640 {"BRB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB},
641 {"PRS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRS},
646 { /* After Invert 5 */
647 {"SRC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_SRC},
648 {"PB Client1", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB1},
649 {"PB Client2", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB2},
650 {"RPB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RPB},
651 {"PBF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF},
652 {"QM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_QM},
653 {"TM", ATTENTION_PAR_INT, ecore_tm_attn_cb, BLOCK_TM},
654 {"MCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MCM},
655 {"MSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSDM},
656 {"MSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSEM},
657 {"PCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PCM},
658 {"PSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSDM},
659 {"PSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSEM},
660 {"TCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCM},
661 {"TSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSDM},
662 {"TSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSEM},
667 { /* After Invert 6 */
668 {"UCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_UCM},
669 {"USDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USDM},
670 {"USEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USEM},
671 {"XCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XCM},
672 {"XSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSDM},
673 {"XSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSEM},
674 {"YCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YCM},
675 {"YSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSDM},
676 {"YSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSEM},
677 {"XYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XYLD},
678 {"TMLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TMLD},
679 {"MYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MULD},
680 {"YULD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YULD},
681 {"DORQ", ATTENTION_PAR_INT, ecore_dorq_attn_cb, BLOCK_DORQ},
682 {"DBG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DBG},
683 {"IPC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IPC},
688 { /* After Invert 7 */
689 {"CCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CCFC},
690 {"CDU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CDU},
691 {"DMAE", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DMAE},
692 {"IGU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IGU},
693 {"ATC", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
694 {"CAU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CAU},
695 {"PTU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PTU},
696 {"PRM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRM},
697 {"TCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCFC},
698 {"RDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RDIF},
699 {"TDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TDIF},
700 {"RSS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RSS},
701 {"MISC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISC},
702 {"MISCS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISCS},
703 {"PCIE", ATTENTION_PAR, OSAL_NULL, BLOCK_PCIE},
704 {"Vaux PCI core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
705 {"PSWRQ", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ},
710 { /* After Invert 8 */
711 {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ2},
712 {"PSWWR", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR},
713 {"PSWWR (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR2},
714 {"PSWRD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD},
715 {"PSWRD (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD2},
716 {"PSWHST", ATTENTION_PAR_INT, ecore_pswhst_attn_cb, BLOCK_PSWHST},
717 {"PSWHST (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWHST2},
718 {"GRC", ATTENTION_PAR_INT, ecore_grc_attn_cb, BLOCK_GRC},
719 {"CPMU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CPMU},
720 {"NCSI", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NCSI},
721 {"MSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
722 {"PSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
723 {"TSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
724 {"USEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
725 {"XSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
726 {"YSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
727 {"pxp_misc_mps", ATTENTION_PAR, OSAL_NULL, BLOCK_PGLCS},
728 {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
729 {"PERST_B assertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
730 {"PERST_B deassertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
731 {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID },
736 { /* After Invert 9 */
737 {"MCP Latched memory", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
738 {"MCP Latched scratchpad cache", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
739 {"MCP Latched ump_tx", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
740 {"MCP Latched scratchpad", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
741 {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID },
747 static struct aeu_invert_reg_bit *
748 ecore_int_aeu_translate(struct ecore_hwfn *p_hwfn,
749 struct aeu_invert_reg_bit *p_bit)
751 if (!ECORE_IS_BB(p_hwfn->p_dev))
754 if (!(p_bit->flags & ATTENTION_BB_DIFFERENT))
757 return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >>
761 static bool ecore_int_is_parity_flag(struct ecore_hwfn *p_hwfn,
762 struct aeu_invert_reg_bit *p_bit)
764 return !!(ecore_int_aeu_translate(p_hwfn, p_bit)->flags &
768 #define ATTN_STATE_BITS (0xfff)
769 #define ATTN_BITS_MASKABLE (0x3ff)
770 struct ecore_sb_attn_info {
771 /* Virtual & Physical address of the SB */
772 struct atten_status_block *sb_attn;
775 /* Last seen running index */
778 /* A mask of the AEU bits resulting in a parity error */
779 u32 parity_mask[NUM_ATTN_REGS];
781 /* A pointer to the attention description structure */
782 struct aeu_invert_reg *p_aeu_desc;
784 /* Previously asserted attentions, which are still unasserted */
787 /* Cleanup address for the link's general hw attention */
791 static u16 ecore_attn_update_idx(struct ecore_hwfn *p_hwfn,
792 struct ecore_sb_attn_info *p_sb_desc)
796 OSAL_MMIOWB(p_hwfn->p_dev);
798 index = OSAL_LE16_TO_CPU(p_sb_desc->sb_attn->sb_index);
799 if (p_sb_desc->index != index) {
800 p_sb_desc->index = index;
801 rc = ECORE_SB_ATT_IDX;
804 OSAL_MMIOWB(p_hwfn->p_dev);
810 * @brief ecore_int_assertion - handles asserted attention bits
813 * @param asserted_bits newly asserted bits
814 * @return enum _ecore_status_t
816 static enum _ecore_status_t ecore_int_assertion(struct ecore_hwfn *p_hwfn,
819 struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
822 /* Mask the source of the attention in the IGU */
823 igu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
824 IGU_REG_ATTENTION_ENABLE);
825 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
826 igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
827 igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
828 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask);
830 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
831 "inner known ATTN state: 0x%04x --> 0x%04x\n",
832 sb_attn_sw->known_attn,
833 sb_attn_sw->known_attn | asserted_bits);
834 sb_attn_sw->known_attn |= asserted_bits;
836 /* Handle MCP events */
837 if (asserted_bits & 0x100) {
838 ecore_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt);
839 /* Clean the MCP attention */
840 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
841 sb_attn_sw->mfw_attn_addr, 0);
844 /* FIXME - this will change once we'll have GOOD gtt definitions */
845 DIRECT_REG_WR(p_hwfn,
846 (u8 OSAL_IOMEM*)p_hwfn->regview +
847 GTT_BAR0_MAP_REG_IGU_CMD +
848 ((IGU_CMD_ATTN_BIT_SET_UPPER -
849 IGU_CMD_INT_ACK_BASE) << 3), (u32)asserted_bits);
851 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "set cmd IGU: 0x%04x\n",
854 return ECORE_SUCCESS;
857 static void ecore_int_attn_print(struct ecore_hwfn *p_hwfn,
858 enum block_id id, enum dbg_attn_type type,
861 struct dbg_attn_block_result attn_results;
862 enum dbg_status status;
864 OSAL_MEMSET(&attn_results, 0, sizeof(attn_results));
866 status = ecore_dbg_read_attn(p_hwfn, p_hwfn->p_dpc_ptt, id, type,
867 b_clear, &attn_results);
869 if (status != DBG_STATUS_OK)
870 DP_NOTICE(p_hwfn, true,
871 "Failed to parse attention information [status: %s]\n",
872 ecore_dbg_get_status_str(status));
874 ecore_dbg_parse_attn(p_hwfn, &attn_results);
876 if (status != DBG_STATUS_OK)
877 DP_NOTICE(p_hwfn, true,
878 "Failed to parse attention information [status: %d]\n",
881 ecore_dbg_print_attn(p_hwfn, &attn_results);
886 * @brief ecore_int_deassertion_aeu_bit - handles the effects of a single
887 * cause of the attention
890 * @param p_aeu - descriptor of an AEU bit which caused the attention
891 * @param aeu_en_reg - register offset of the AEU enable reg. which configured
892 * this bit to this group.
893 * @param bit_index - index of this bit in the aeu_en_reg
895 * @return enum _ecore_status_t
897 static enum _ecore_status_t
898 ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn,
899 struct aeu_invert_reg_bit *p_aeu,
901 const char *p_bit_name,
904 enum _ecore_status_t rc = ECORE_INVAL;
905 bool b_fatal = false;
907 DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n",
908 p_bit_name, bitmask);
910 /* Call callback before clearing the interrupt status */
912 DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n",
914 rc = p_aeu->cb(p_hwfn);
917 if (rc != ECORE_SUCCESS)
920 /* Print HW block interrupt registers */
921 if (p_aeu->block_index != MAX_BLOCK_ID)
922 ecore_int_attn_print(p_hwfn, p_aeu->block_index,
923 ATTN_TYPE_INTERRUPT, !b_fatal);
925 /* Reach assertion if attention is fatal */
927 DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n",
930 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
933 /* Prevent this Attention from being asserted in the future */
934 if (p_aeu->flags & ATTENTION_CLEAR_ENABLE ||
935 p_hwfn->p_dev->attn_clr_en) {
938 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
939 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask));
940 DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n",
948 * @brief ecore_int_deassertion_parity - handle a single parity AEU source
951 * @param p_aeu - descriptor of an AEU bit which caused the parity
952 * @param aeu_en_reg - address of the AEU enable register
955 static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn,
956 struct aeu_invert_reg_bit *p_aeu,
957 u32 aeu_en_reg, u8 bit_index)
959 u32 block_id = p_aeu->block_index, mask, val;
961 DP_NOTICE(p_hwfn->p_dev, false,
962 "%s parity attention is set [address 0x%08x, bit %d]\n",
963 p_aeu->bit_name, aeu_en_reg, bit_index);
965 if (block_id == MAX_BLOCK_ID)
968 ecore_int_attn_print(p_hwfn, block_id,
969 ATTN_TYPE_PARITY, false);
971 /* In A0, there's a single parity bit for several blocks */
972 if (block_id == BLOCK_BTB) {
973 ecore_int_attn_print(p_hwfn, BLOCK_OPTE,
974 ATTN_TYPE_PARITY, false);
975 ecore_int_attn_print(p_hwfn, BLOCK_MCP,
976 ATTN_TYPE_PARITY, false);
979 /* Prevent this parity error from being re-asserted */
980 mask = ~(0x1 << bit_index);
981 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
982 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask);
983 DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n",
988 * @brief - handles deassertion of previously asserted attentions.
991 * @param deasserted_bits - newly deasserted bits
992 * @return enum _ecore_status_t
995 static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
998 struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
999 u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en;
1000 u8 i, j, k, bit_idx;
1001 enum _ecore_status_t rc = ECORE_SUCCESS;
1003 /* Read the attention registers in the AEU */
1004 for (i = 0; i < NUM_ATTN_REGS; i++) {
1005 aeu_inv_arr[i] = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1006 MISC_REG_AEU_AFTER_INVERT_1_IGU +
1008 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1009 "Deasserted bits [%d]: %08x\n",
1013 /* Handle parity attentions first */
1014 for (i = 0; i < NUM_ATTN_REGS; i++)
1016 struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
1019 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32);
1020 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
1021 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
1023 /* Skip register in which no parity bit is currently set */
1027 for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
1028 struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
1030 if (ecore_int_is_parity_flag(p_hwfn, p_bit) &&
1031 !!(parities & (1 << bit_idx)))
1032 ecore_int_deassertion_parity(p_hwfn, p_bit,
1035 bit_idx += ATTENTION_LENGTH(p_bit->flags);
1039 /* Find non-parity cause for attention and act */
1040 for (k = 0; k < MAX_ATTN_GRPS; k++) {
1041 struct aeu_invert_reg_bit *p_aeu;
1043 /* Handle only groups whose attention is currently deasserted */
1044 if (!(deasserted_bits & (1 << k)))
1047 for (i = 0; i < NUM_ATTN_REGS; i++) {
1050 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
1052 k * sizeof(u32) * NUM_ATTN_REGS;
1053 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
1054 bits = aeu_inv_arr[i] & en;
1056 /* Skip if no bit from this group is currently set */
1060 /* Find all set bits from current register which belong
1061 * to current group, making them responsible for the
1062 * previous assertion.
1064 for (j = 0, bit_idx = 0; bit_idx < 32; j++)
1066 long unsigned int bitmask;
1069 /* Need to account bits with changed meaning */
1070 p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
1071 p_aeu = ecore_int_aeu_translate(p_hwfn, p_aeu);
1074 bit_len = ATTENTION_LENGTH(p_aeu->flags);
1075 if (ecore_int_is_parity_flag(p_hwfn, p_aeu)) {
1081 /* Find the bits relating to HW-block, then
1082 * shift so they'll become LSB.
1084 bitmask = bits & (((1 << bit_len) - 1) << bit);
1088 u32 flags = p_aeu->flags;
1092 num = (u8)OSAL_FIND_FIRST_BIT(&bitmask,
1095 /* Some bits represent more than a
1096 * a single interrupt. Correctly print
1099 if (ATTENTION_LENGTH(flags) > 2 ||
1100 ((flags & ATTENTION_PAR_INT) &&
1101 ATTENTION_LENGTH(flags) > 1))
1102 OSAL_SNPRINTF(bit_name, 30,
1106 OSAL_STRNCPY(bit_name,
1110 /* We now need to pass bitmask in its
1115 /* Handle source of the attention */
1116 ecore_int_deassertion_aeu_bit(p_hwfn,
1123 bit_idx += ATTENTION_LENGTH(p_aeu->flags);
1128 /* Clear IGU indication for the deasserted bits */
1129 /* FIXME - this will change once we'll have GOOD gtt definitions */
1130 DIRECT_REG_WR(p_hwfn,
1131 (u8 OSAL_IOMEM*)p_hwfn->regview +
1132 GTT_BAR0_MAP_REG_IGU_CMD +
1133 ((IGU_CMD_ATTN_BIT_CLR_UPPER -
1134 IGU_CMD_INT_ACK_BASE) << 3),
1135 ~((u32)deasserted_bits));
1137 /* Unmask deasserted attentions in IGU */
1138 aeu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1139 IGU_REG_ATTENTION_ENABLE);
1140 aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
1141 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
1143 /* Clear deassertion from inner state */
1144 sb_attn_sw->known_attn &= ~deasserted_bits;
1149 static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn)
1151 struct ecore_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
1152 struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
1153 u16 index = 0, asserted_bits, deasserted_bits;
1154 u32 attn_bits = 0, attn_acks = 0;
1155 enum _ecore_status_t rc = ECORE_SUCCESS;
1157 /* Read current attention bits/acks - safeguard against attentions
1158 * by guaranting work on a synchronized timeframe
1161 index = OSAL_LE16_TO_CPU(p_sb_attn->sb_index);
1162 attn_bits = OSAL_LE32_TO_CPU(p_sb_attn->atten_bits);
1163 attn_acks = OSAL_LE32_TO_CPU(p_sb_attn->atten_ack);
1164 } while (index != OSAL_LE16_TO_CPU(p_sb_attn->sb_index));
1165 p_sb_attn->sb_index = index;
1167 /* Attention / Deassertion are meaningful (and in correct state)
1168 * only when they differ and consistent with known state - deassertion
1169 * when previous attention & current ack, and assertion when current
1170 * attention with no previous attention
1172 asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
1173 ~p_sb_attn_sw->known_attn;
1174 deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
1175 p_sb_attn_sw->known_attn;
1177 if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100))
1179 "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
1180 index, attn_bits, attn_acks, asserted_bits,
1181 deasserted_bits, p_sb_attn_sw->known_attn);
1182 else if (asserted_bits == 0x100)
1184 "MFW indication via attention\n");
1186 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1187 "MFW indication [deassertion]\n");
1189 if (asserted_bits) {
1190 rc = ecore_int_assertion(p_hwfn, asserted_bits);
1195 if (deasserted_bits)
1196 rc = ecore_int_deassertion(p_hwfn, deasserted_bits);
1201 static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn,
1202 void OSAL_IOMEM *igu_addr, u32 ack_cons)
1204 struct igu_prod_cons_update igu_ack = { 0 };
1206 igu_ack.sb_id_and_flags =
1207 ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
1208 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
1209 (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
1210 (IGU_SEG_ACCESS_ATTN <<
1211 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
1213 DIRECT_REG_WR(p_hwfn, igu_addr, igu_ack.sb_id_and_flags);
1215 /* Both segments (interrupts & acks) are written to same place address;
1216 * Need to guarantee all commands will be received (in-order) by HW.
1218 OSAL_MMIOWB(p_hwfn->p_dev);
1219 OSAL_BARRIER(p_hwfn->p_dev);
1222 void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)
1224 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie;
1225 struct ecore_pi_info *pi_info = OSAL_NULL;
1226 struct ecore_sb_attn_info *sb_attn;
1227 struct ecore_sb_info *sb_info;
1234 if (!p_hwfn->p_sp_sb) {
1235 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sp_sb\n");
1239 sb_info = &p_hwfn->p_sp_sb->sb_info;
1240 arr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
1242 DP_ERR(p_hwfn->p_dev, "Status block is NULL - cannot ack interrupts\n");
1246 if (!p_hwfn->p_sb_attn) {
1247 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sb_attn");
1250 sb_attn = p_hwfn->p_sb_attn;
1252 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
1253 p_hwfn, p_hwfn->my_id);
1255 /* Disable ack for def status block. Required both for msix +
1256 * inta in non-mask mode, in inta does no harm.
1258 ecore_sb_ack(sb_info, IGU_INT_DISABLE, 0);
1260 /* Gather Interrupts/Attentions information */
1261 if (!sb_info->sb_virt) {
1262 DP_ERR(p_hwfn->p_dev, "Interrupt Status block is NULL - cannot check for new interrupts!\n");
1264 u32 tmp_index = sb_info->sb_ack;
1265 rc = ecore_sb_update_sb_idx(sb_info);
1266 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
1267 "Interrupt indices: 0x%08x --> 0x%08x\n",
1268 tmp_index, sb_info->sb_ack);
1271 if (!sb_attn || !sb_attn->sb_attn) {
1272 DP_ERR(p_hwfn->p_dev, "Attentions Status block is NULL - cannot check for new attentions!\n");
1274 u16 tmp_index = sb_attn->index;
1276 rc |= ecore_attn_update_idx(p_hwfn, sb_attn);
1277 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
1278 "Attention indices: 0x%08x --> 0x%08x\n",
1279 tmp_index, sb_attn->index);
1282 /* Check if we expect interrupts at this time. if not just ack them */
1283 if (!(rc & ECORE_SB_EVENT_MASK)) {
1284 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1288 /* Check the validity of the DPC ptt. If not ack interrupts and fail */
1289 if (!p_hwfn->p_dpc_ptt) {
1290 DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n");
1291 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1295 if (rc & ECORE_SB_ATT_IDX)
1296 ecore_int_attentions(p_hwfn);
1298 if (rc & ECORE_SB_IDX) {
1301 /* Since we only looked at the SB index, it's possible more
1302 * than a single protocol-index on the SB incremented.
1303 * Iterate over all configured protocol indices and check
1304 * whether something happened for each.
1306 for (pi = 0; pi < arr_size; pi++) {
1307 pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
1308 if (pi_info->comp_cb != OSAL_NULL)
1309 pi_info->comp_cb(p_hwfn, pi_info->cookie);
1313 if (sb_attn && (rc & ECORE_SB_ATT_IDX)) {
1314 /* This should be done before the interrupts are enabled,
1315 * since otherwise a new attention will be generated.
1317 ecore_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
1320 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1323 static void ecore_int_sb_attn_free(struct ecore_hwfn *p_hwfn)
1325 struct ecore_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
1330 if (p_sb->sb_attn) {
1331 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sb->sb_attn,
1333 SB_ATTN_ALIGNED_SIZE(p_hwfn));
1336 OSAL_FREE(p_hwfn->p_dev, p_sb);
1337 p_hwfn->p_sb_attn = OSAL_NULL;
1340 static void ecore_int_sb_attn_setup(struct ecore_hwfn *p_hwfn,
1341 struct ecore_ptt *p_ptt)
1343 struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1345 OSAL_MEMSET(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn));
1348 sb_info->known_attn = 0;
1350 /* Configure Attention Status Block in IGU */
1351 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L,
1352 DMA_LO(p_hwfn->p_sb_attn->sb_phys));
1353 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H,
1354 DMA_HI(p_hwfn->p_sb_attn->sb_phys));
1357 static void ecore_int_sb_attn_init(struct ecore_hwfn *p_hwfn,
1358 struct ecore_ptt *p_ptt,
1360 dma_addr_t sb_phy_addr)
1362 struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1365 sb_info->sb_attn = sb_virt_addr;
1366 sb_info->sb_phys = sb_phy_addr;
1368 /* Set the pointer to the AEU descriptors */
1369 sb_info->p_aeu_desc = aeu_descs;
1371 /* Calculate Parity Masks */
1372 OSAL_MEMSET(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS);
1373 for (i = 0; i < NUM_ATTN_REGS; i++) {
1374 /* j is array index, k is bit index */
1375 for (j = 0, k = 0; k < 32; j++) {
1376 struct aeu_invert_reg_bit *p_aeu;
1378 p_aeu = &aeu_descs[i].bits[j];
1379 if (ecore_int_is_parity_flag(p_hwfn, p_aeu))
1380 sb_info->parity_mask[i] |= 1 << k;
1382 k += ATTENTION_LENGTH(p_aeu->flags);
1384 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1385 "Attn Mask [Reg %d]: 0x%08x\n",
1386 i, sb_info->parity_mask[i]);
1389 /* Set the address of cleanup for the mcp attention */
1390 sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
1391 MISC_REG_AEU_GENERAL_ATTN_0;
1393 ecore_int_sb_attn_setup(p_hwfn, p_ptt);
1396 static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn,
1397 struct ecore_ptt *p_ptt)
1399 struct ecore_dev *p_dev = p_hwfn->p_dev;
1400 struct ecore_sb_attn_info *p_sb;
1401 dma_addr_t p_phys = 0;
1405 p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(*p_sb));
1407 DP_NOTICE(p_dev, true, "Failed to allocate `struct ecore_sb_attn_info'\n");
1412 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
1413 SB_ATTN_ALIGNED_SIZE(p_hwfn));
1415 DP_NOTICE(p_dev, true, "Failed to allocate status block (attentions)\n");
1416 OSAL_FREE(p_dev, p_sb);
1420 /* Attention setup */
1421 p_hwfn->p_sb_attn = p_sb;
1422 ecore_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys);
1424 return ECORE_SUCCESS;
1427 /* coalescing timeout = timeset << (timer_res + 1) */
1428 #define ECORE_CAU_DEF_RX_USECS 24
1429 #define ECORE_CAU_DEF_TX_USECS 48
1431 void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
1432 struct cau_sb_entry *p_sb_entry,
1433 u8 pf_id, u16 vf_number, u8 vf_valid)
1435 struct ecore_dev *p_dev = p_hwfn->p_dev;
1439 OSAL_MEMSET(p_sb_entry, 0, sizeof(*p_sb_entry));
1441 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
1442 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
1443 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
1444 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
1445 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
1447 cau_state = CAU_HC_DISABLE_STATE;
1449 if (p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
1450 cau_state = CAU_HC_ENABLE_STATE;
1451 if (!p_dev->rx_coalesce_usecs)
1452 p_dev->rx_coalesce_usecs = ECORE_CAU_DEF_RX_USECS;
1453 if (!p_dev->tx_coalesce_usecs)
1454 p_dev->tx_coalesce_usecs = ECORE_CAU_DEF_TX_USECS;
1457 /* Coalesce = (timeset << timer-res), timeset is 7bit wide */
1458 if (p_dev->rx_coalesce_usecs <= 0x7F)
1460 else if (p_dev->rx_coalesce_usecs <= 0xFF)
1464 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
1466 if (p_dev->tx_coalesce_usecs <= 0x7F)
1468 else if (p_dev->tx_coalesce_usecs <= 0xFF)
1472 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
1474 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
1475 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
1478 static void _ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
1479 struct ecore_ptt *p_ptt,
1480 u16 igu_sb_id, u32 pi_index,
1481 enum ecore_coalescing_fsm coalescing_fsm,
1484 struct cau_pi_entry pi_entry;
1485 u32 sb_offset, pi_offset;
1487 if (IS_VF(p_hwfn->p_dev))
1488 return;/* @@@TBD MichalK- VF CAU... */
1490 sb_offset = igu_sb_id * PIS_PER_SB_E4;
1491 OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));
1493 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
1494 if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE)
1495 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
1497 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
1499 pi_offset = sb_offset + pi_index;
1500 if (p_hwfn->hw_init_done) {
1501 ecore_wr(p_hwfn, p_ptt,
1502 CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
1503 *((u32 *)&(pi_entry)));
1505 STORE_RT_REG(p_hwfn,
1506 CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
1507 *((u32 *)&(pi_entry)));
1511 void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
1512 struct ecore_ptt *p_ptt,
1513 struct ecore_sb_info *p_sb, u32 pi_index,
1514 enum ecore_coalescing_fsm coalescing_fsm,
1517 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, p_sb->igu_sb_id,
1518 pi_index, coalescing_fsm, timeset);
1521 void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
1522 struct ecore_ptt *p_ptt,
1523 dma_addr_t sb_phys, u16 igu_sb_id,
1524 u16 vf_number, u8 vf_valid)
1526 struct cau_sb_entry sb_entry;
1528 ecore_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
1529 vf_number, vf_valid);
1531 if (p_hwfn->hw_init_done) {
1532 /* Wide-bus, initialize via DMAE */
1533 u64 phys_addr = (u64)sb_phys;
1535 ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&phys_addr,
1536 CAU_REG_SB_ADDR_MEMORY +
1537 igu_sb_id * sizeof(u64), 2, 0);
1538 ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&sb_entry,
1539 CAU_REG_SB_VAR_MEMORY +
1540 igu_sb_id * sizeof(u64), 2, 0);
1542 /* Initialize Status Block Address */
1543 STORE_RT_REG_AGG(p_hwfn,
1544 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET+igu_sb_id*2,
1547 STORE_RT_REG_AGG(p_hwfn,
1548 CAU_REG_SB_VAR_MEMORY_RT_OFFSET+igu_sb_id*2,
1552 /* Configure pi coalescing if set */
1553 if (p_hwfn->p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
1554 /* eth will open queues for all tcs, so configure all of them
1555 * properly, rather than just the active ones
1557 u8 num_tc = p_hwfn->hw_info.num_hw_tc;
1559 u8 timeset, timer_res;
1562 /* timeset = (coalesce >> timer-res), timeset is 7bit wide */
1563 if (p_hwfn->p_dev->rx_coalesce_usecs <= 0x7F)
1565 else if (p_hwfn->p_dev->rx_coalesce_usecs <= 0xFF)
1569 timeset = (u8)(p_hwfn->p_dev->rx_coalesce_usecs >> timer_res);
1570 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
1571 ECORE_COAL_RX_STATE_MACHINE,
1574 if (p_hwfn->p_dev->tx_coalesce_usecs <= 0x7F)
1576 else if (p_hwfn->p_dev->tx_coalesce_usecs <= 0xFF)
1580 timeset = (u8)(p_hwfn->p_dev->tx_coalesce_usecs >> timer_res);
1581 for (i = 0; i < num_tc; i++) {
1582 _ecore_int_cau_conf_pi(p_hwfn, p_ptt,
1583 igu_sb_id, TX_PI(i),
1584 ECORE_COAL_TX_STATE_MACHINE,
1590 void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
1591 struct ecore_ptt *p_ptt,
1592 struct ecore_sb_info *sb_info)
1594 /* zero status block and ack counter */
1595 sb_info->sb_ack = 0;
1596 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1598 if (IS_PF(p_hwfn->p_dev))
1599 ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
1600 sb_info->igu_sb_id, 0, 0);
1603 struct ecore_igu_block *
1604 ecore_get_igu_free_sb(struct ecore_hwfn *p_hwfn, bool b_is_pf)
1606 struct ecore_igu_block *p_block;
1609 for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
1611 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
1613 if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
1614 !(p_block->status & ECORE_IGU_STATUS_FREE))
1617 if (!!(p_block->status & ECORE_IGU_STATUS_PF) ==
1625 static u16 ecore_get_pf_igu_sb_id(struct ecore_hwfn *p_hwfn,
1628 struct ecore_igu_block *p_block;
1631 for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
1633 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
1635 if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
1637 p_block->vector_number != vector_id)
1643 return ECORE_SB_INVALID_IDX;
1646 u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
1650 /* Assuming continuous set of IGU SBs dedicated for given PF */
1651 if (sb_id == ECORE_SP_SB_ID)
1652 igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
1653 else if (IS_PF(p_hwfn->p_dev))
1654 igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
1656 igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id);
1658 if (igu_sb_id == ECORE_SB_INVALID_IDX)
1659 DP_NOTICE(p_hwfn, true,
1660 "Slowpath SB vector %04x doesn't exist\n",
1662 else if (sb_id == ECORE_SP_SB_ID)
1663 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1664 "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
1666 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1667 "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
1672 enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
1673 struct ecore_ptt *p_ptt,
1674 struct ecore_sb_info *sb_info,
1676 dma_addr_t sb_phy_addr,
1679 sb_info->sb_virt = sb_virt_addr;
1680 sb_info->sb_phys = sb_phy_addr;
1682 sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id);
1684 if (sb_info->igu_sb_id == ECORE_SB_INVALID_IDX)
1687 /* Let the igu info reference the client's SB info */
1688 if (sb_id != ECORE_SP_SB_ID) {
1689 if (IS_PF(p_hwfn->p_dev)) {
1690 struct ecore_igu_info *p_info;
1691 struct ecore_igu_block *p_block;
1693 p_info = p_hwfn->hw_info.p_igu_info;
1694 p_block = &p_info->entry[sb_info->igu_sb_id];
1696 p_block->sb_info = sb_info;
1697 p_block->status &= ~ECORE_IGU_STATUS_FREE;
1698 p_info->usage.free_cnt--;
1700 ecore_vf_set_sb_info(p_hwfn, sb_id, sb_info);
1704 #ifdef ECORE_CONFIG_DIRECT_HWFN
1705 sb_info->p_hwfn = p_hwfn;
1707 sb_info->p_dev = p_hwfn->p_dev;
1709 /* The igu address will hold the absolute address that needs to be
1710 * written to for a specific status block
1712 if (IS_PF(p_hwfn->p_dev)) {
1713 sb_info->igu_addr = (u8 OSAL_IOMEM*)p_hwfn->regview +
1714 GTT_BAR0_MAP_REG_IGU_CMD +
1715 (sb_info->igu_sb_id << 3);
1719 (u8 OSAL_IOMEM*)p_hwfn->regview +
1720 PXP_VF_BAR0_START_IGU +
1721 ((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3);
1724 sb_info->flags |= ECORE_SB_INFO_INIT;
1726 ecore_int_sb_setup(p_hwfn, p_ptt, sb_info);
1728 return ECORE_SUCCESS;
1731 enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
1732 struct ecore_sb_info *sb_info,
1735 struct ecore_igu_info *p_info;
1736 struct ecore_igu_block *p_block;
1738 if (sb_info == OSAL_NULL)
1739 return ECORE_SUCCESS;
1741 /* zero status block and ack counter */
1742 sb_info->sb_ack = 0;
1743 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1745 if (IS_VF(p_hwfn->p_dev)) {
1746 ecore_vf_set_sb_info(p_hwfn, sb_id, OSAL_NULL);
1747 return ECORE_SUCCESS;
1750 p_info = p_hwfn->hw_info.p_igu_info;
1751 p_block = &p_info->entry[sb_info->igu_sb_id];
1753 /* Vector 0 is reserved to Default SB */
1754 if (p_block->vector_number == 0) {
1755 DP_ERR(p_hwfn, "Do Not free sp sb using this function");
1759 /* Lose reference to client's SB info, and fix counters */
1760 p_block->sb_info = OSAL_NULL;
1761 p_block->status |= ECORE_IGU_STATUS_FREE;
1762 p_info->usage.free_cnt++;
1764 return ECORE_SUCCESS;
1767 static void ecore_int_sp_sb_free(struct ecore_hwfn *p_hwfn)
1769 struct ecore_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
1774 if (p_sb->sb_info.sb_virt) {
1775 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
1776 p_sb->sb_info.sb_virt,
1777 p_sb->sb_info.sb_phys,
1778 SB_ALIGNED_SIZE(p_hwfn));
1781 OSAL_FREE(p_hwfn->p_dev, p_sb);
1782 p_hwfn->p_sp_sb = OSAL_NULL;
1785 static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
1786 struct ecore_ptt *p_ptt)
1788 struct ecore_sb_sp_info *p_sb;
1789 dma_addr_t p_phys = 0;
1793 p_sb = OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sb));
1795 DP_NOTICE(p_hwfn, true, "Failed to allocate `struct ecore_sb_info'\n");
1800 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
1802 SB_ALIGNED_SIZE(p_hwfn));
1804 DP_NOTICE(p_hwfn, true, "Failed to allocate status block\n");
1805 OSAL_FREE(p_hwfn->p_dev, p_sb);
1810 /* Status Block setup */
1811 p_hwfn->p_sp_sb = p_sb;
1812 ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info,
1813 p_virt, p_phys, ECORE_SP_SB_ID);
1815 OSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
1817 return ECORE_SUCCESS;
1820 enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
1821 ecore_int_comp_cb_t comp_cb,
1826 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
1827 enum _ecore_status_t rc = ECORE_NOMEM;
1830 /* Look for a free index */
1831 for (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
1832 if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL)
1835 p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
1836 p_sp_sb->pi_info_arr[pi].cookie = cookie;
1838 *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
1846 enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn,
1849 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
1851 if (p_sp_sb->pi_info_arr[pi].comp_cb == OSAL_NULL)
1854 p_sp_sb->pi_info_arr[pi].comp_cb = OSAL_NULL;
1855 p_sp_sb->pi_info_arr[pi].cookie = OSAL_NULL;
1857 return ECORE_SUCCESS;
1860 u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn)
1862 return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
1865 void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn,
1866 struct ecore_ptt *p_ptt,
1867 enum ecore_int_mode int_mode)
1869 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
1872 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1873 DP_INFO(p_hwfn, "FPGA - don't enable ATTN generation in IGU\n");
1874 igu_pf_conf &= ~IGU_PF_CONF_ATTN_BIT_EN;
1878 p_hwfn->p_dev->int_mode = int_mode;
1879 switch (p_hwfn->p_dev->int_mode) {
1880 case ECORE_INT_MODE_INTA:
1881 igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
1882 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1885 case ECORE_INT_MODE_MSI:
1886 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1887 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1890 case ECORE_INT_MODE_MSIX:
1891 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1893 case ECORE_INT_MODE_POLL:
1897 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
1900 static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn,
1901 struct ecore_ptt *p_ptt)
1904 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1905 DP_INFO(p_hwfn, "FPGA - Don't enable Attentions in IGU and MISC\n");
1910 /* Configure AEU signal change to produce attentions */
1911 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
1912 ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
1913 ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
1914 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
1916 /* Flush the writes to IGU */
1917 OSAL_MMIOWB(p_hwfn->p_dev);
1919 /* Unmask AEU signals toward IGU */
1920 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
1923 enum _ecore_status_t
1924 ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1925 enum ecore_int_mode int_mode)
1927 enum _ecore_status_t rc = ECORE_SUCCESS;
1930 /* @@@tmp - Starting with MFW 8.2.1.0 we've started hitting AVS stop
1931 * attentions. Since we're waiting for BRCM answer regarding this
1932 * attention, in the meanwhile we simply mask it.
1934 tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0);
1936 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp);
1938 ecore_int_igu_enable_attn(p_hwfn, p_ptt);
1940 if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
1941 rc = OSAL_SLOWPATH_IRQ_REQ(p_hwfn);
1942 if (rc != ECORE_SUCCESS) {
1943 DP_NOTICE(p_hwfn, true, "Slowpath IRQ request failed\n");
1944 return ECORE_NORESOURCES;
1946 p_hwfn->b_int_requested = true;
1949 /* Enable interrupt Generation */
1950 ecore_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
1952 p_hwfn->b_int_enabled = 1;
1957 void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
1958 struct ecore_ptt *p_ptt)
1960 p_hwfn->b_int_enabled = 0;
1962 if (IS_VF(p_hwfn->p_dev))
1965 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
1968 #define IGU_CLEANUP_SLEEP_LENGTH (1000)
1969 static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
1970 struct ecore_ptt *p_ptt,
1975 u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
1976 u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id;
1977 u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
1978 u8 type = 0; /* FIXME MichalS type??? */
1980 OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 -
1981 IGU_REG_CLEANUP_STATUS_0) != 0x200);
1983 /* USE Control Command Register to perform cleanup. There is an
1984 * option to do this using IGU bar, but then it can't be used for VFs.
1987 /* Set the data field */
1988 SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
1989 SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, type);
1990 SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
1992 /* Set the control register */
1993 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
1994 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
1995 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
1997 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
1999 OSAL_BARRIER(p_hwfn->p_dev);
2001 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
2003 /* Flush the write to IGU */
2004 OSAL_MMIOWB(p_hwfn->p_dev);
2006 /* calculate where to read the status bit from */
2007 sb_bit = 1 << (igu_sb_id % 32);
2008 sb_bit_addr = igu_sb_id / 32 * sizeof(u32);
2010 sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type);
2012 /* Now wait for the command to complete */
2013 while (--sleep_cnt) {
2014 val = ecore_rd(p_hwfn, p_ptt, sb_bit_addr);
2015 if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
2021 DP_NOTICE(p_hwfn, true,
2022 "Timeout waiting for clear status 0x%08x [for sb %d]\n",
2026 void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
2027 struct ecore_ptt *p_ptt,
2028 u16 igu_sb_id, u16 opaque, bool b_set)
2030 struct ecore_igu_block *p_block;
2033 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
2034 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2035 "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n",
2036 igu_sb_id, p_block->function_id, p_block->is_pf,
2037 p_block->vector_number);
2041 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque);
2044 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque);
2046 /* Wait for the IGU SB to cleanup */
2047 for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) {
2050 val = ecore_rd(p_hwfn, p_ptt,
2051 IGU_REG_WRITE_DONE_PENDING +
2052 ((igu_sb_id / 32) * 4));
2053 if (val & (1 << (igu_sb_id % 32)))
2058 if (i == IGU_CLEANUP_SLEEP_LENGTH)
2059 DP_NOTICE(p_hwfn, true,
2060 "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
2063 /* Clear the CAU for the SB */
2064 for (pi = 0; pi < 12; pi++)
2065 ecore_wr(p_hwfn, p_ptt,
2066 CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0);
2069 void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
2070 struct ecore_ptt *p_ptt,
2074 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
2075 struct ecore_igu_block *p_block;
2079 /* @@@TBD MichalK temporary... should be moved to init-tool... */
2080 val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
2081 val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
2082 val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
2083 ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
2087 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
2089 p_block = &p_info->entry[igu_sb_id];
2091 if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
2093 (p_block->status & ECORE_IGU_STATUS_DSB))
2096 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id,
2097 p_hwfn->hw_info.opaque_fid,
2102 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
2104 p_hwfn->hw_info.opaque_fid,
2108 int ecore_int_igu_reset_cam(struct ecore_hwfn *p_hwfn,
2109 struct ecore_ptt *p_ptt)
2111 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
2112 struct ecore_igu_block *p_block;
2117 if (!RESC_NUM(p_hwfn, ECORE_SB)) {
2118 /* We're using an old MFW - have to prevent any switching
2119 * of SBs between PF and VFs as later driver wouldn't be
2120 * able to tell which belongs to which.
2122 p_info->b_allow_pf_vf_change = false;
2124 /* Use the numbers the MFW have provided -
2125 * don't forget MFW accounts for the default SB as well.
2127 p_info->b_allow_pf_vf_change = true;
2129 if (p_info->usage.cnt != RESC_NUM(p_hwfn, ECORE_SB) - 1) {
2131 "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n",
2132 RESC_NUM(p_hwfn, ECORE_SB) - 1,
2134 p_info->usage.cnt = RESC_NUM(p_hwfn, ECORE_SB) - 1;
2137 /* TODO - how do we learn about VF SBs from MFW? */
2138 if (IS_PF_SRIOV(p_hwfn)) {
2139 u16 vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
2141 if (vfs != p_info->usage.iov_cnt)
2142 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2143 "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n",
2144 p_info->usage.iov_cnt, vfs);
2146 /* At this point we know how many SBs we have totally
2147 * in IGU + number of PF SBs. So we can validate that
2148 * we'd have sufficient for VF.
2150 if (vfs > p_info->usage.free_cnt +
2151 p_info->usage.free_cnt_iov -
2152 p_info->usage.cnt) {
2153 DP_NOTICE(p_hwfn, true,
2154 "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n",
2155 p_info->usage.free_cnt +
2156 p_info->usage.free_cnt_iov,
2157 p_info->usage.cnt, vfs);
2161 /* Currently cap the number of VFs SBs by the
2164 p_info->usage.iov_cnt = vfs;
2168 /* Mark all SBs as free, now in the right PF/VFs division */
2169 p_info->usage.free_cnt = p_info->usage.cnt;
2170 p_info->usage.free_cnt_iov = p_info->usage.iov_cnt;
2171 p_info->usage.orig = p_info->usage.cnt;
2172 p_info->usage.iov_orig = p_info->usage.iov_cnt;
2174 /* We now proceed to re-configure the IGU cam to reflect the initial
2175 * configuration. We can start with the Default SB.
2177 pf_sbs = p_info->usage.cnt;
2178 vf_sbs = p_info->usage.iov_cnt;
2180 for (igu_sb_id = p_info->igu_dsb_id;
2181 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
2183 p_block = &p_info->entry[igu_sb_id];
2186 if (!(p_block->status & ECORE_IGU_STATUS_VALID))
2189 if (p_block->status & ECORE_IGU_STATUS_DSB) {
2190 p_block->function_id = p_hwfn->rel_pf_id;
2192 p_block->vector_number = 0;
2193 p_block->status = ECORE_IGU_STATUS_VALID |
2194 ECORE_IGU_STATUS_PF |
2195 ECORE_IGU_STATUS_DSB;
2196 } else if (pf_sbs) {
2198 p_block->function_id = p_hwfn->rel_pf_id;
2200 p_block->vector_number = p_info->usage.cnt - pf_sbs;
2201 p_block->status = ECORE_IGU_STATUS_VALID |
2202 ECORE_IGU_STATUS_PF |
2203 ECORE_IGU_STATUS_FREE;
2204 } else if (vf_sbs) {
2205 p_block->function_id =
2206 p_hwfn->p_dev->p_iov_info->first_vf_in_pf +
2207 p_info->usage.iov_cnt - vf_sbs;
2209 p_block->vector_number = 0;
2210 p_block->status = ECORE_IGU_STATUS_VALID |
2211 ECORE_IGU_STATUS_FREE;
2214 p_block->function_id = 0;
2216 p_block->vector_number = 0;
2219 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
2220 p_block->function_id);
2221 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf);
2222 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER,
2223 p_block->vector_number);
2225 /* VF entries would be enabled when VF is initializaed */
2226 SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf);
2228 rval = ecore_rd(p_hwfn, p_ptt,
2229 IGU_REG_MAPPING_MEMORY +
2230 sizeof(u32) * igu_sb_id);
2233 ecore_wr(p_hwfn, p_ptt,
2234 IGU_REG_MAPPING_MEMORY +
2235 sizeof(u32) * igu_sb_id,
2238 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2239 "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n",
2240 igu_sb_id, p_block->function_id,
2241 p_block->is_pf, p_block->vector_number,
2249 int ecore_int_igu_reset_cam_default(struct ecore_hwfn *p_hwfn,
2250 struct ecore_ptt *p_ptt)
2252 struct ecore_sb_cnt_info *p_cnt = &p_hwfn->hw_info.p_igu_info->usage;
2254 /* Return all the usage indications to default prior to the reset;
2255 * The reset expects the !orig to reflect the initial status of the
2256 * SBs, and would re-calculate the originals based on those.
2258 p_cnt->cnt = p_cnt->orig;
2259 p_cnt->free_cnt = p_cnt->orig;
2260 p_cnt->iov_cnt = p_cnt->iov_orig;
2261 p_cnt->free_cnt_iov = p_cnt->iov_orig;
2263 p_cnt->iov_orig = 0;
2265 /* TODO - we probably need to re-configure the CAU as well... */
2266 return ecore_int_igu_reset_cam(p_hwfn, p_ptt);
2269 static void ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn,
2270 struct ecore_ptt *p_ptt,
2273 u32 val = ecore_rd(p_hwfn, p_ptt,
2274 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id);
2275 struct ecore_igu_block *p_block;
2277 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
2279 /* Fill the block information */
2280 p_block->function_id = GET_FIELD(val,
2281 IGU_MAPPING_LINE_FUNCTION_NUMBER);
2282 p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
2283 p_block->vector_number = GET_FIELD(val,
2284 IGU_MAPPING_LINE_VECTOR_NUMBER);
2285 p_block->igu_sb_id = igu_sb_id;
2288 enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
2289 struct ecore_ptt *p_ptt)
2291 struct ecore_igu_info *p_igu_info;
2292 struct ecore_igu_block *p_block;
2293 u32 min_vf = 0, max_vf = 0;
2296 p_hwfn->hw_info.p_igu_info = OSAL_ZALLOC(p_hwfn->p_dev,
2298 sizeof(*p_igu_info));
2299 if (!p_hwfn->hw_info.p_igu_info)
2301 p_igu_info = p_hwfn->hw_info.p_igu_info;
2303 /* Distinguish between existent and onn-existent default SB */
2304 p_igu_info->igu_dsb_id = ECORE_SB_INVALID_IDX;
2306 /* Find the range of VF ids whose SB belong to this PF */
2307 if (p_hwfn->p_dev->p_iov_info) {
2308 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
2310 min_vf = p_iov->first_vf_in_pf;
2311 max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs;
2315 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
2317 /* Read current entry; Notice it might not belong to this PF */
2318 ecore_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id);
2319 p_block = &p_igu_info->entry[igu_sb_id];
2321 if ((p_block->is_pf) &&
2322 (p_block->function_id == p_hwfn->rel_pf_id)) {
2323 p_block->status = ECORE_IGU_STATUS_PF |
2324 ECORE_IGU_STATUS_VALID |
2325 ECORE_IGU_STATUS_FREE;
2327 if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX)
2328 p_igu_info->usage.cnt++;
2329 } else if (!(p_block->is_pf) &&
2330 (p_block->function_id >= min_vf) &&
2331 (p_block->function_id < max_vf)) {
2332 /* Available for VFs of this PF */
2333 p_block->status = ECORE_IGU_STATUS_VALID |
2334 ECORE_IGU_STATUS_FREE;
2336 if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX)
2337 p_igu_info->usage.iov_cnt++;
2340 /* Mark the First entry belonging to the PF or its VFs
2341 * as the default SB [we'll reset IGU prior to first usage].
2343 if ((p_block->status & ECORE_IGU_STATUS_VALID) &&
2344 (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX)) {
2345 p_igu_info->igu_dsb_id = igu_sb_id;
2346 p_block->status |= ECORE_IGU_STATUS_DSB;
2349 /* While this isn't suitable for all clients, limit number
2350 * of prints by having each PF print only its entries with the
2351 * exception of PF0 which would print everything.
2353 if ((p_block->status & ECORE_IGU_STATUS_VALID) ||
2354 (p_hwfn->abs_pf_id == 0))
2355 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2356 "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
2357 igu_sb_id, p_block->function_id,
2358 p_block->is_pf, p_block->vector_number);
2361 if (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX) {
2362 DP_NOTICE(p_hwfn, true,
2363 "IGU CAM returned invalid values igu_dsb_id=0x%x\n",
2364 p_igu_info->igu_dsb_id);
2368 /* All non default SB are considered free at this point */
2369 p_igu_info->usage.free_cnt = p_igu_info->usage.cnt;
2370 p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt;
2372 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2373 "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n",
2374 p_igu_info->igu_dsb_id, p_igu_info->usage.cnt,
2375 p_igu_info->usage.iov_cnt);
2377 return ECORE_SUCCESS;
2380 enum _ecore_status_t
2381 ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2382 u16 sb_id, bool b_to_vf)
2384 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
2385 struct ecore_igu_block *p_block = OSAL_NULL;
2386 u16 igu_sb_id = 0, vf_num = 0;
2389 if (IS_VF(p_hwfn->p_dev) || !IS_PF_SRIOV(p_hwfn))
2392 if (sb_id == ECORE_SP_SB_ID)
2395 if (!p_info->b_allow_pf_vf_change) {
2396 DP_INFO(p_hwfn, "Can't relocate SBs as MFW is too old.\n");
2400 /* If we're moving a SB from PF to VF, the client had to specify
2401 * which vector it wants to move.
2404 igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
2405 if (igu_sb_id == ECORE_SB_INVALID_IDX)
2409 /* If we're moving a SB from VF to PF, need to validate there isn't
2410 * already a line configured for that vector.
2413 if (ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1) !=
2414 ECORE_SB_INVALID_IDX)
2418 /* We need to validate that the SB can actually be relocated.
2419 * This would also handle the previous case where we've explicitly
2420 * stated which IGU SB needs to move.
2422 for (; igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
2424 p_block = &p_info->entry[igu_sb_id];
2426 if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
2427 !(p_block->status & ECORE_IGU_STATUS_FREE) ||
2428 (!!(p_block->status & ECORE_IGU_STATUS_PF) != b_to_vf)) {
2438 if (igu_sb_id == ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev)) {
2439 DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV),
2440 "Failed to find a free SB to move\n");
2444 /* At this point, p_block points to the SB we want to relocate */
2446 p_block->status &= ~ECORE_IGU_STATUS_PF;
2448 /* It doesn't matter which VF number we choose, since we're
2449 * going to disable the line; But let's keep it in range.
2451 vf_num = (u16)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
2453 p_block->function_id = (u8)vf_num;
2455 p_block->vector_number = 0;
2457 p_info->usage.cnt--;
2458 p_info->usage.free_cnt--;
2459 p_info->usage.iov_cnt++;
2460 p_info->usage.free_cnt_iov++;
2462 /* TODO - if SBs aren't really the limiting factor,
2463 * then it might not be accurate [in the since that
2464 * we might not need decrement the feature].
2466 p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]--;
2467 p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]++;
2469 p_block->status |= ECORE_IGU_STATUS_PF;
2470 p_block->function_id = p_hwfn->rel_pf_id;
2472 p_block->vector_number = sb_id + 1;
2474 p_info->usage.cnt++;
2475 p_info->usage.free_cnt++;
2476 p_info->usage.iov_cnt--;
2477 p_info->usage.free_cnt_iov--;
2479 p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]++;
2480 p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]--;
2483 /* Update the IGU and CAU with the new configuration */
2484 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
2485 p_block->function_id);
2486 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf);
2487 SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf);
2488 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER,
2489 p_block->vector_number);
2491 ecore_wr(p_hwfn, p_ptt,
2492 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id,
2495 ecore_int_cau_conf_sb(p_hwfn, p_ptt, 0,
2497 p_block->is_pf ? 0 : 1);
2499 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2500 "Relocation: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
2501 igu_sb_id, p_block->function_id,
2502 p_block->is_pf, p_block->vector_number);
2504 return ECORE_SUCCESS;
2508 * @brief Initialize igu runtime registers
2512 void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn)
2514 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
2516 STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
2519 #define LSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - \
2520 IGU_CMD_INT_ACK_BASE)
2521 #define MSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - \
2522 IGU_CMD_INT_ACK_BASE)
2523 u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn)
2525 u32 intr_status_hi = 0, intr_status_lo = 0;
2526 u64 intr_status = 0;
2528 intr_status_lo = REG_RD(p_hwfn,
2529 GTT_BAR0_MAP_REG_IGU_CMD +
2530 LSB_IGU_CMD_ADDR * 8);
2531 intr_status_hi = REG_RD(p_hwfn,
2532 GTT_BAR0_MAP_REG_IGU_CMD +
2533 MSB_IGU_CMD_ADDR * 8);
2534 intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
2539 static void ecore_int_sp_dpc_setup(struct ecore_hwfn *p_hwfn)
2541 OSAL_DPC_INIT(p_hwfn->sp_dpc, p_hwfn);
2542 p_hwfn->b_sp_dpc_enabled = true;
2545 static enum _ecore_status_t ecore_int_sp_dpc_alloc(struct ecore_hwfn *p_hwfn)
2547 p_hwfn->sp_dpc = OSAL_DPC_ALLOC(p_hwfn);
2548 if (!p_hwfn->sp_dpc)
2551 return ECORE_SUCCESS;
2554 static void ecore_int_sp_dpc_free(struct ecore_hwfn *p_hwfn)
2556 OSAL_FREE(p_hwfn->p_dev, p_hwfn->sp_dpc);
2557 p_hwfn->sp_dpc = OSAL_NULL;
2560 enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn,
2561 struct ecore_ptt *p_ptt)
2563 enum _ecore_status_t rc = ECORE_SUCCESS;
2565 rc = ecore_int_sp_dpc_alloc(p_hwfn);
2566 if (rc != ECORE_SUCCESS) {
2567 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp dpc mem\n");
2571 rc = ecore_int_sp_sb_alloc(p_hwfn, p_ptt);
2572 if (rc != ECORE_SUCCESS) {
2573 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp sb mem\n");
2577 rc = ecore_int_sb_attn_alloc(p_hwfn, p_ptt);
2578 if (rc != ECORE_SUCCESS)
2579 DP_ERR(p_hwfn->p_dev, "Failed to allocate sb attn mem\n");
2584 void ecore_int_free(struct ecore_hwfn *p_hwfn)
2586 ecore_int_sp_sb_free(p_hwfn);
2587 ecore_int_sb_attn_free(p_hwfn);
2588 ecore_int_sp_dpc_free(p_hwfn);
2591 void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
2593 if (!p_hwfn || !p_hwfn->p_sp_sb || !p_hwfn->p_sb_attn)
2596 ecore_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
2597 ecore_int_sb_attn_setup(p_hwfn, p_ptt);
2598 ecore_int_sp_dpc_setup(p_hwfn);
2601 void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
2602 struct ecore_sb_cnt_info *p_sb_cnt_info)
2604 struct ecore_igu_info *p_igu_info = p_hwfn->hw_info.p_igu_info;
2606 if (!p_igu_info || !p_sb_cnt_info)
2609 OSAL_MEMCPY(p_sb_cnt_info, &p_igu_info->usage,
2610 sizeof(*p_sb_cnt_info));
2613 void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev)
2617 for_each_hwfn(p_dev, i)
2618 p_dev->hwfns[i].b_int_requested = false;
2621 void ecore_int_attn_clr_enable(struct ecore_dev *p_dev, bool clr_enable)
2623 p_dev->attn_clr_en = clr_enable;
2626 enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
2627 struct ecore_ptt *p_ptt,
2628 u8 timer_res, u16 sb_id, bool tx)
2630 struct cau_sb_entry sb_entry;
2631 enum _ecore_status_t rc;
2633 if (!p_hwfn->hw_init_done) {
2634 DP_ERR(p_hwfn, "hardware not initialized yet\n");
2638 rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2639 sb_id * sizeof(u64),
2640 (u64)(osal_uintptr_t)&sb_entry, 2, 0);
2641 if (rc != ECORE_SUCCESS) {
2642 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2647 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
2649 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
2651 rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
2652 (u64)(osal_uintptr_t)&sb_entry,
2653 CAU_REG_SB_VAR_MEMORY +
2654 sb_id * sizeof(u64), 2, 0);
2655 if (rc != ECORE_SUCCESS) {
2656 DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc);
2663 enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
2664 struct ecore_ptt *p_ptt,
2665 struct ecore_sb_info *p_sb,
2666 struct ecore_sb_info_dbg *p_info)
2668 u16 sbid = p_sb->igu_sb_id;
2671 if (IS_VF(p_hwfn->p_dev))
2674 if (sbid > NUM_OF_SBS(p_hwfn->p_dev))
2677 p_info->igu_prod = ecore_rd(p_hwfn, p_ptt,
2678 IGU_REG_PRODUCER_MEMORY + sbid * 4);
2679 p_info->igu_cons = ecore_rd(p_hwfn, p_ptt,
2680 IGU_REG_CONSUMER_MEM + sbid * 4);
2682 for (i = 0; i < PIS_PER_SB_E4; i++)
2683 p_info->pi[i] = (u16)ecore_rd(p_hwfn, p_ptt,
2685 sbid * 4 * PIS_PER_SB_E4 + i * 4);
2687 return ECORE_SUCCESS;