2 * Copyright (c) 2017-2018 Cavium, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
36 #include "ecore_spq.h"
38 #include "ecore_gtt_reg_addr.h"
39 #include "ecore_init_ops.h"
40 #include "ecore_rt_defs.h"
41 #include "ecore_int.h"
44 #include "ecore_sriov.h"
46 #include "ecore_hw_defs.h"
47 #include "ecore_hsi_common.h"
48 #include "ecore_mcp.h"
49 #include "ecore_dbg_fw_funcs.h"
52 /* This is nasty, but diag is using the drv_dbg_fw_funcs.c [non-ecore flavor],
53 * and so the functions are lacking ecore prefix.
54 * If there would be other clients needing this [or if the content that isn't
55 * really optional there would increase], we'll need to re-think this.
57 enum dbg_status dbg_read_attn(struct ecore_hwfn *dev,
58 struct ecore_ptt *ptt,
60 enum dbg_attn_type attn_type,
62 struct dbg_attn_block_result *results);
64 enum dbg_status dbg_parse_attn(struct ecore_hwfn *dev,
65 struct dbg_attn_block_result *results);
67 #define ecore_dbg_read_attn(hwfn, ptt, id, type, clear, results) \
68 dbg_read_attn(hwfn, ptt, id, type, clear, results)
69 #define ecore_dbg_parse_attn(hwfn, results) \
70 dbg_parse_attn(hwfn, results)
73 struct ecore_pi_info {
74 ecore_int_comp_cb_t comp_cb;
75 void *cookie; /* Will be sent to the completion callback function */
78 struct ecore_sb_sp_info {
79 struct ecore_sb_info sb_info;
80 /* per protocol index data */
81 struct ecore_pi_info pi_info_arr[PIS_PER_SB];
84 enum ecore_attention_type {
86 ECORE_ATTN_TYPE_PARITY,
89 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
90 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
92 struct aeu_invert_reg_bit {
95 #define ATTENTION_PARITY (1 << 0)
97 #define ATTENTION_LENGTH_MASK (0x00000ff0)
98 #define ATTENTION_LENGTH_SHIFT (4)
99 #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
100 ATTENTION_LENGTH_SHIFT)
101 #define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT)
102 #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
103 #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
106 /* Multiple bits start with this offset */
107 #define ATTENTION_OFFSET_MASK (0x000ff000)
108 #define ATTENTION_OFFSET_SHIFT (12)
110 #define ATTENTION_BB_MASK (0x00700000)
111 #define ATTENTION_BB_SHIFT (20)
112 #define ATTENTION_BB(value) (value << ATTENTION_BB_SHIFT)
113 #define ATTENTION_BB_DIFFERENT (1 << 23)
115 #define ATTENTION_CLEAR_ENABLE (1 << 28)
118 /* Callback to call if attention will be triggered */
119 enum _ecore_status_t (*cb)(struct ecore_hwfn *p_hwfn);
121 enum block_id block_index;
124 struct aeu_invert_reg {
125 struct aeu_invert_reg_bit bits[32];
128 #define MAX_ATTN_GRPS (8)
129 #define NUM_ATTN_REGS (9)
131 static enum _ecore_status_t ecore_mcp_attn_cb(struct ecore_hwfn *p_hwfn)
133 u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE);
135 DP_INFO(p_hwfn->p_dev, "MCP_REG_CPU_STATE: %08x - Masking...\n",
137 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK,
140 return ECORE_SUCCESS;
143 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK (0x3c000)
144 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT (14)
145 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK (0x03fc0)
146 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT (6)
147 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK (0x00020)
148 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT (5)
149 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK (0x0001e)
150 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT (1)
151 #define ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK (0x1)
152 #define ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT (0)
153 #define ECORE_PSWHST_ATTENTION_VF_DISABLED (0x1)
154 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1)
155 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1)
156 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0)
157 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0x1e)
158 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1)
159 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x20)
160 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5)
161 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0x3fc0)
162 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6)
163 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0x3c000)
164 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14)
165 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0x3fc0000)
166 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18)
167 static enum _ecore_status_t ecore_pswhst_attn_cb(struct ecore_hwfn *p_hwfn)
169 u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, PSWHST_REG_VF_DISABLED_ERROR_VALID);
171 /* Disabled VF access */
172 if (tmp & ECORE_PSWHST_ATTENTION_VF_DISABLED) {
175 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
176 PSWHST_REG_VF_DISABLED_ERROR_ADDRESS);
177 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
178 PSWHST_REG_VF_DISABLED_ERROR_DATA);
179 DP_INFO(p_hwfn->p_dev, "PF[0x%02x] VF [0x%02x] [Valid 0x%02x] Client [0x%02x] Write [0x%02x] Addr [0x%08x]\n",
180 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK) >>
181 ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT),
182 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK) >>
183 ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT),
184 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK) >>
185 ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT),
186 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK) >>
187 ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT),
188 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK) >>
189 ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT),
193 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
194 PSWHST_REG_INCORRECT_ACCESS_VALID);
195 if (tmp & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS) {
196 u32 addr, data, length;
198 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
199 PSWHST_REG_INCORRECT_ACCESS_ADDRESS);
200 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
201 PSWHST_REG_INCORRECT_ACCESS_DATA);
202 length = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
203 PSWHST_REG_INCORRECT_ACCESS_LENGTH);
205 DP_INFO(p_hwfn->p_dev, "Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n",
207 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK) >>
208 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT),
209 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK) >>
210 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT),
211 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK) >>
212 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT),
213 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK) >>
214 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT),
215 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK) >>
216 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT),
217 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK) >>
218 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT),
222 /* TODO - We know 'some' of these are legal due to virtualization,
223 * but is it true for all of them?
225 return ECORE_SUCCESS;
228 #define ECORE_GRC_ATTENTION_VALID_BIT (1 << 0)
229 #define ECORE_GRC_ATTENTION_ADDRESS_MASK (0x7fffff << 0)
230 #define ECORE_GRC_ATTENTION_RDWR_BIT (1 << 23)
231 #define ECORE_GRC_ATTENTION_MASTER_MASK (0xf << 24)
232 #define ECORE_GRC_ATTENTION_MASTER_SHIFT (24)
233 #define ECORE_GRC_ATTENTION_PF_MASK (0xf)
234 #define ECORE_GRC_ATTENTION_VF_MASK (0xff << 4)
235 #define ECORE_GRC_ATTENTION_VF_SHIFT (4)
236 #define ECORE_GRC_ATTENTION_PRIV_MASK (0x3 << 14)
237 #define ECORE_GRC_ATTENTION_PRIV_SHIFT (14)
238 #define ECORE_GRC_ATTENTION_PRIV_VF (0)
239 static const char* grc_timeout_attn_master_to_str(u8 master)
242 case 1: return "PXP";
243 case 2: return "MCP";
244 case 3: return "MSDM";
245 case 4: return "PSDM";
246 case 5: return "YSDM";
247 case 6: return "USDM";
248 case 7: return "TSDM";
249 case 8: return "XSDM";
250 case 9: return "DBU";
251 case 10: return "DMAE";
257 static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn)
261 /* We've already cleared the timeout interrupt register, so we learn
262 * of interrupts via the validity register
264 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
265 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID);
266 if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT))
269 /* Read the GRC timeout information */
270 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
271 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0);
272 tmp2 = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
273 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
275 DP_INFO(p_hwfn->p_dev,
276 "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n",
278 (tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from",
279 (tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2,
280 grc_timeout_attn_master_to_str((tmp & ECORE_GRC_ATTENTION_MASTER_MASK) >>
281 ECORE_GRC_ATTENTION_MASTER_SHIFT),
282 (tmp2 & ECORE_GRC_ATTENTION_PF_MASK),
283 (((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >>
284 ECORE_GRC_ATTENTION_PRIV_SHIFT) ==
285 ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)",
286 (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >>
287 ECORE_GRC_ATTENTION_VF_SHIFT);
290 /* Regardles of anything else, clean the validity bit */
291 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
292 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0);
293 return ECORE_SUCCESS;
296 #define ECORE_PGLUE_ATTENTION_VALID (1 << 29)
297 #define ECORE_PGLUE_ATTENTION_RD_VALID (1 << 26)
298 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf << 20)
299 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20)
300 #define ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID (1 << 19)
301 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff << 24)
302 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24)
303 #define ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR (1 << 21)
304 #define ECORE_PGLUE_ATTENTION_DETAILS2_BME (1 << 22)
305 #define ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN (1 << 23)
306 #define ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23)
307 #define ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25)
308 #define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23)
309 static enum _ecore_status_t ecore_pglub_rbc_attn_cb(struct ecore_hwfn *p_hwfn)
313 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
314 PGLUE_B_REG_TX_ERR_WR_DETAILS2);
315 if (tmp & ECORE_PGLUE_ATTENTION_VALID) {
316 u32 addr_lo, addr_hi, details;
318 addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
319 PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
320 addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
321 PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
322 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
323 PGLUE_B_REG_TX_ERR_WR_DETAILS);
325 DP_INFO(p_hwfn, "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
326 addr_hi, addr_lo, details,
327 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
328 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
329 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0),
331 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0),
332 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0),
333 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0));
336 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
337 PGLUE_B_REG_TX_ERR_RD_DETAILS2);
338 if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) {
339 u32 addr_lo, addr_hi, details;
341 addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
342 PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
343 addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
344 PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
345 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
346 PGLUE_B_REG_TX_ERR_RD_DETAILS);
348 DP_INFO(p_hwfn, "Illegal read by chip from [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
349 addr_hi, addr_lo, details,
350 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
351 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
352 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0),
354 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0),
355 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0),
356 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0));
359 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
360 PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
361 if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID)
362 DP_INFO(p_hwfn, "ICPL eror - %08x\n", tmp);
364 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
365 PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
366 if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) {
367 u32 addr_hi, addr_lo;
369 addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
370 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
371 addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
372 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
374 DP_INFO(p_hwfn, "ICPL eror - %08x [Address %08x:%08x]\n",
375 tmp, addr_hi, addr_lo);
378 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
379 PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
380 if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) {
381 u32 addr_hi, addr_lo, details;
383 addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
384 PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
385 addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
386 PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
387 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
388 PGLUE_B_REG_VF_ILT_ERR_DETAILS);
390 DP_INFO(p_hwfn, "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
391 details, tmp, addr_hi, addr_lo);
394 /* Clear the indications */
395 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
396 PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
398 return ECORE_SUCCESS;
401 static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn)
403 DP_NOTICE(p_hwfn, false, "FW assertion!\n");
405 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FW_ASSERT);
410 static enum _ecore_status_t
411 ecore_general_attention_35(struct ecore_hwfn *p_hwfn)
413 DP_INFO(p_hwfn, "General attention 35!\n");
415 return ECORE_SUCCESS;
418 #define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff)
419 #define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
420 #define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f0000)
421 #define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16)
423 static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn)
427 reason = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, DORQ_REG_DB_DROP_REASON) &
428 ECORE_DORQ_ATTENTION_REASON_MASK;
430 u32 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
431 DORQ_REG_DB_DROP_DETAILS);
433 DP_INFO(p_hwfn->p_dev,
434 "DORQ db_drop: address 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x\n",
435 ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
436 DORQ_REG_DB_DROP_DETAILS_ADDRESS),
437 (u16)(details & ECORE_DORQ_ATTENTION_OPAQUE_MASK),
438 ((details & ECORE_DORQ_ATTENTION_SIZE_MASK) >>
439 ECORE_DORQ_ATTENTION_SIZE_SHIFT) * 4, reason);
445 static enum _ecore_status_t ecore_tm_attn_cb(struct ecore_hwfn *p_hwfn)
448 if (CHIP_REV_IS_EMUL_B0(p_hwfn->p_dev)) {
449 u32 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
452 if (val & ~(TM_REG_INT_STS_1_PEND_TASK_SCAN |
453 TM_REG_INT_STS_1_PEND_CONN_SCAN))
456 if (val & (TM_REG_INT_STS_1_PEND_TASK_SCAN |
457 TM_REG_INT_STS_1_PEND_CONN_SCAN))
458 DP_INFO(p_hwfn, "TM attention on emulation - most likely results of clock-ratios\n");
459 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1);
460 val |= TM_REG_INT_MASK_1_PEND_CONN_SCAN |
461 TM_REG_INT_MASK_1_PEND_TASK_SCAN;
462 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1, val);
464 return ECORE_SUCCESS;
471 /* Instead of major changes to the data-structure, we have a some 'special'
472 * identifiers for sources that changed meaning between adapters.
474 enum aeu_invert_reg_special_type {
475 AEU_INVERT_REG_SPECIAL_CNIG_0,
476 AEU_INVERT_REG_SPECIAL_CNIG_1,
477 AEU_INVERT_REG_SPECIAL_CNIG_2,
478 AEU_INVERT_REG_SPECIAL_CNIG_3,
479 AEU_INVERT_REG_SPECIAL_MAX,
482 static struct aeu_invert_reg_bit
483 aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = {
484 {"CNIG port 0", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
485 {"CNIG port 1", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
486 {"CNIG port 2", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
487 {"CNIG port 3", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
490 /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
491 static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] =
494 { /* After Invert 1 */
495 {"GPIO0 function%d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID},
500 { /* After Invert 2 */
501 {"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
502 {"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
503 {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglub_rbc_attn_cb, BLOCK_PGLUE_B},
504 {"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
505 {"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
506 {"SMB event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
507 {"Main Power", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
508 {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT) | (1 << ATTENTION_OFFSET_SHIFT), OSAL_NULL, MAX_BLOCK_ID},
509 {"PCIE glue/PXP VPD %d", (16 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, BLOCK_PGLCS},
514 { /* After Invert 3 */
515 {"General Attention %d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID},
520 { /* After Invert 4 */
521 {"General Attention 32", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, ecore_fw_assertion, MAX_BLOCK_ID},
522 {"General Attention %d", (2 << ATTENTION_LENGTH_SHIFT) | (33 << ATTENTION_OFFSET_SHIFT), OSAL_NULL, MAX_BLOCK_ID},
523 {"General Attention 35", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, ecore_general_attention_35, MAX_BLOCK_ID},
524 {"NWS Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
525 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0) , OSAL_NULL, BLOCK_NWS},
526 {"NWS Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
527 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1), OSAL_NULL, BLOCK_NWS},
528 {"NWM Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
529 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2), OSAL_NULL, BLOCK_NWM},
530 {"NWM Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
531 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3), OSAL_NULL, BLOCK_NWM},
532 {"MCP CPU", ATTENTION_SINGLE, ecore_mcp_attn_cb, MAX_BLOCK_ID},
533 {"MCP Watchdog timer", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
534 {"MCP M2P", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
535 {"AVS stop status ready", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
536 {"MSTAT", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
537 {"MSTAT per-path", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
538 {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID },
539 {"NIG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NIG},
540 {"BMB/OPTE/MCP", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB},
541 {"BTB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB},
542 {"BRB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB},
543 {"PRS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRS},
548 { /* After Invert 5 */
549 {"SRC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_SRC},
550 {"PB Client1", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB1},
551 {"PB Client2", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB2},
552 {"RPB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RPB},
553 {"PBF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF},
554 {"QM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_QM},
555 {"TM", ATTENTION_PAR_INT, ecore_tm_attn_cb, BLOCK_TM},
556 {"MCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MCM},
557 {"MSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSDM},
558 {"MSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSEM},
559 {"PCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PCM},
560 {"PSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSDM},
561 {"PSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSEM},
562 {"TCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCM},
563 {"TSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSDM},
564 {"TSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSEM},
569 { /* After Invert 6 */
570 {"UCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_UCM},
571 {"USDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USDM},
572 {"USEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USEM},
573 {"XCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XCM},
574 {"XSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSDM},
575 {"XSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSEM},
576 {"YCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YCM},
577 {"YSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSDM},
578 {"YSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSEM},
579 {"XYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XYLD},
580 {"TMLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TMLD},
581 {"MYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MULD},
582 {"YULD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YULD},
583 {"DORQ", ATTENTION_PAR_INT, ecore_dorq_attn_cb, BLOCK_DORQ},
584 {"DBG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DBG},
585 {"IPC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IPC},
590 { /* After Invert 7 */
591 {"CCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CCFC},
592 {"CDU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CDU},
593 {"DMAE", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DMAE},
594 {"IGU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IGU},
595 {"ATC", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
596 {"CAU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CAU},
597 {"PTU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PTU},
598 {"PRM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRM},
599 {"TCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCFC},
600 {"RDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RDIF},
601 {"TDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TDIF},
602 {"RSS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RSS},
603 {"MISC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISC},
604 {"MISCS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISCS},
605 {"PCIE", ATTENTION_PAR, OSAL_NULL, BLOCK_PCIE},
606 {"Vaux PCI core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
607 {"PSWRQ", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ},
612 { /* After Invert 8 */
613 {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ2},
614 {"PSWWR", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR},
615 {"PSWWR (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR2},
616 {"PSWRD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD},
617 {"PSWRD (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD2},
618 {"PSWHST", ATTENTION_PAR_INT, ecore_pswhst_attn_cb, BLOCK_PSWHST},
619 {"PSWHST (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWHST2},
620 {"GRC", ATTENTION_PAR_INT, ecore_grc_attn_cb, BLOCK_GRC},
621 {"CPMU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CPMU},
622 {"NCSI", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NCSI},
623 {"MSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
624 {"PSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
625 {"TSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
626 {"USEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
627 {"XSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
628 {"YSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
629 {"pxp_misc_mps", ATTENTION_PAR, OSAL_NULL, BLOCK_PGLCS},
630 {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
631 {"PERST_B assertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
632 {"PERST_B deassertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
633 {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID },
638 { /* After Invert 9 */
639 {"MCP Latched memory", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
640 {"MCP Latched scratchpad cache", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
641 {"MCP Latched ump_tx", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
642 {"MCP Latched scratchpad", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
643 {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID },
649 static struct aeu_invert_reg_bit *
650 ecore_int_aeu_translate(struct ecore_hwfn *p_hwfn,
651 struct aeu_invert_reg_bit *p_bit)
653 if (!ECORE_IS_BB(p_hwfn->p_dev))
656 if (!(p_bit->flags & ATTENTION_BB_DIFFERENT))
659 return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >>
663 static bool ecore_int_is_parity_flag(struct ecore_hwfn *p_hwfn,
664 struct aeu_invert_reg_bit *p_bit)
666 return !!(ecore_int_aeu_translate(p_hwfn, p_bit)->flags &
670 #define ATTN_STATE_BITS (0xfff)
671 #define ATTN_BITS_MASKABLE (0x3ff)
672 struct ecore_sb_attn_info {
673 /* Virtual & Physical address of the SB */
674 struct atten_status_block *sb_attn;
677 /* Last seen running index */
680 /* A mask of the AEU bits resulting in a parity error */
681 u32 parity_mask[NUM_ATTN_REGS];
683 /* A pointer to the attention description structure */
684 struct aeu_invert_reg *p_aeu_desc;
686 /* Previously asserted attentions, which are still unasserted */
689 /* Cleanup address for the link's general hw attention */
693 static u16 ecore_attn_update_idx(struct ecore_hwfn *p_hwfn,
694 struct ecore_sb_attn_info *p_sb_desc)
698 OSAL_MMIOWB(p_hwfn->p_dev);
700 index = OSAL_LE16_TO_CPU(p_sb_desc->sb_attn->sb_index);
701 if (p_sb_desc->index != index) {
702 p_sb_desc->index = index;
703 rc = ECORE_SB_ATT_IDX;
706 OSAL_MMIOWB(p_hwfn->p_dev);
712 * @brief ecore_int_assertion - handles asserted attention bits
715 * @param asserted_bits newly asserted bits
716 * @return enum _ecore_status_t
718 static enum _ecore_status_t ecore_int_assertion(struct ecore_hwfn *p_hwfn,
721 struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
724 /* Mask the source of the attention in the IGU */
725 igu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
726 IGU_REG_ATTENTION_ENABLE);
727 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
728 igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
729 igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
730 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask);
732 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
733 "inner known ATTN state: 0x%04x --> 0x%04x\n",
734 sb_attn_sw->known_attn,
735 sb_attn_sw->known_attn | asserted_bits);
736 sb_attn_sw->known_attn |= asserted_bits;
738 /* Handle MCP events */
739 if (asserted_bits & 0x100) {
740 ecore_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt);
741 /* Clean the MCP attention */
742 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
743 sb_attn_sw->mfw_attn_addr, 0);
746 /* FIXME - this will change once we'll have GOOD gtt definitions */
747 DIRECT_REG_WR(p_hwfn,
748 (u8 OSAL_IOMEM*)p_hwfn->regview +
749 GTT_BAR0_MAP_REG_IGU_CMD +
750 ((IGU_CMD_ATTN_BIT_SET_UPPER -
751 IGU_CMD_INT_ACK_BASE) << 3), (u32)asserted_bits);
753 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "set cmd IGU: 0x%04x\n",
756 return ECORE_SUCCESS;
759 static void ecore_int_attn_print(struct ecore_hwfn *p_hwfn,
760 enum block_id id, enum dbg_attn_type type,
763 struct dbg_attn_block_result attn_results;
764 enum dbg_status status;
766 OSAL_MEMSET(&attn_results, 0, sizeof(attn_results));
768 status = ecore_dbg_read_attn(p_hwfn, p_hwfn->p_dpc_ptt, id, type,
769 b_clear, &attn_results);
770 if (status != DBG_STATUS_OK)
771 DP_NOTICE(p_hwfn, true,
772 "Failed to parse attention information [status %d]\n",
776 ecore_dbg_parse_attn(p_hwfn, &attn_results);
778 ecore_dbg_print_attn(p_hwfn, &attn_results);
783 * @brief ecore_int_deassertion_aeu_bit - handles the effects of a single
784 * cause of the attention
787 * @param p_aeu - descriptor of an AEU bit which caused the attention
788 * @param aeu_en_reg - register offset of the AEU enable reg. which configured
789 * this bit to this group.
790 * @param bit_index - index of this bit in the aeu_en_reg
792 * @return enum _ecore_status_t
794 static enum _ecore_status_t
795 ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn,
796 struct aeu_invert_reg_bit *p_aeu,
798 const char *p_bit_name,
801 enum _ecore_status_t rc = ECORE_INVAL;
802 bool b_fatal = false;
804 DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n",
805 p_bit_name, bitmask);
807 /* Call callback before clearing the interrupt status */
809 DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n",
811 rc = p_aeu->cb(p_hwfn);
814 if (rc != ECORE_SUCCESS)
817 /* Print HW block interrupt registers */
818 if (p_aeu->block_index != MAX_BLOCK_ID)
819 ecore_int_attn_print(p_hwfn, p_aeu->block_index,
820 ATTN_TYPE_INTERRUPT, !b_fatal);
822 /* Reach assertion if attention is fatal */
824 DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n",
827 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
830 /* Prevent this Attention from being asserted in the future */
831 if (p_aeu->flags & ATTENTION_CLEAR_ENABLE ||
832 p_hwfn->p_dev->attn_clr_en) {
835 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
836 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask));
837 DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n",
845 * @brief ecore_int_deassertion_parity - handle a single parity AEU source
848 * @param p_aeu - descriptor of an AEU bit which caused the parity
849 * @param aeu_en_reg - address of the AEU enable register
852 static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn,
853 struct aeu_invert_reg_bit *p_aeu,
854 u32 aeu_en_reg, u8 bit_index)
856 u32 block_id = p_aeu->block_index, mask, val;
858 DP_NOTICE(p_hwfn->p_dev, false,
859 "%s parity attention is set [address 0x%08x, bit %d]\n",
860 p_aeu->bit_name, aeu_en_reg, bit_index);
862 if (block_id == MAX_BLOCK_ID)
865 ecore_int_attn_print(p_hwfn, block_id,
866 ATTN_TYPE_PARITY, false);
868 /* In A0, there's a single parity bit for several blocks */
869 if (block_id == BLOCK_BTB) {
870 ecore_int_attn_print(p_hwfn, BLOCK_OPTE,
871 ATTN_TYPE_PARITY, false);
872 ecore_int_attn_print(p_hwfn, BLOCK_MCP,
873 ATTN_TYPE_PARITY, false);
876 /* Prevent this parity error from being re-asserted */
877 mask = ~(0x1 << bit_index);
878 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
879 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask);
880 DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n",
885 * @brief - handles deassertion of previously asserted attentions.
888 * @param deasserted_bits - newly deasserted bits
889 * @return enum _ecore_status_t
892 static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
895 struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
896 u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en;
898 enum _ecore_status_t rc = ECORE_SUCCESS;
900 /* Read the attention registers in the AEU */
901 for (i = 0; i < NUM_ATTN_REGS; i++) {
902 aeu_inv_arr[i] = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
903 MISC_REG_AEU_AFTER_INVERT_1_IGU +
905 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
906 "Deasserted bits [%d]: %08x\n",
910 /* Handle parity attentions first */
911 for (i = 0; i < NUM_ATTN_REGS; i++)
913 struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
916 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32);
917 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
918 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
920 /* Skip register in which no parity bit is currently set */
924 for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
925 struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
927 if (ecore_int_is_parity_flag(p_hwfn, p_bit) &&
928 !!(parities & (1 << bit_idx)))
929 ecore_int_deassertion_parity(p_hwfn, p_bit,
932 bit_idx += ATTENTION_LENGTH(p_bit->flags);
936 /* Find non-parity cause for attention and act */
937 for (k = 0; k < MAX_ATTN_GRPS; k++) {
938 struct aeu_invert_reg_bit *p_aeu;
940 /* Handle only groups whose attention is currently deasserted */
941 if (!(deasserted_bits & (1 << k)))
944 for (i = 0; i < NUM_ATTN_REGS; i++) {
947 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
949 k * sizeof(u32) * NUM_ATTN_REGS;
950 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
951 bits = aeu_inv_arr[i] & en;
953 /* Skip if no bit from this group is currently set */
957 /* Find all set bits from current register which belong
958 * to current group, making them responsible for the
959 * previous assertion.
961 for (j = 0, bit_idx = 0; bit_idx < 32; j++)
963 long unsigned int bitmask;
966 /* Need to account bits with changed meaning */
967 p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
968 p_aeu = ecore_int_aeu_translate(p_hwfn, p_aeu);
971 bit_len = ATTENTION_LENGTH(p_aeu->flags);
972 if (ecore_int_is_parity_flag(p_hwfn, p_aeu)) {
978 /* Find the bits relating to HW-block, then
979 * shift so they'll become LSB.
981 bitmask = bits & (((1 << bit_len) - 1) << bit);
985 u32 flags = p_aeu->flags;
989 num = (u8)OSAL_FIND_FIRST_BIT(&bitmask,
992 /* Some bits represent more than a
993 * a single interrupt. Correctly print
996 if (ATTENTION_LENGTH(flags) > 2 ||
997 ((flags & ATTENTION_PAR_INT) &&
998 ATTENTION_LENGTH(flags) > 1))
999 OSAL_SNPRINTF(bit_name, 30,
1003 OSAL_STRNCPY(bit_name,
1007 /* We now need to pass bitmask in its
1012 /* Handle source of the attention */
1013 ecore_int_deassertion_aeu_bit(p_hwfn,
1020 bit_idx += ATTENTION_LENGTH(p_aeu->flags);
1025 /* Clear IGU indication for the deasserted bits */
1026 /* FIXME - this will change once we'll have GOOD gtt definitions */
1027 DIRECT_REG_WR(p_hwfn,
1028 (u8 OSAL_IOMEM*)p_hwfn->regview +
1029 GTT_BAR0_MAP_REG_IGU_CMD +
1030 ((IGU_CMD_ATTN_BIT_CLR_UPPER -
1031 IGU_CMD_INT_ACK_BASE) << 3),
1032 ~((u32)deasserted_bits));
1034 /* Unmask deasserted attentions in IGU */
1035 aeu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1036 IGU_REG_ATTENTION_ENABLE);
1037 aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
1038 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
1040 /* Clear deassertion from inner state */
1041 sb_attn_sw->known_attn &= ~deasserted_bits;
1046 static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn)
1048 struct ecore_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
1049 struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
1050 u16 index = 0, asserted_bits, deasserted_bits;
1051 u32 attn_bits = 0, attn_acks = 0;
1052 enum _ecore_status_t rc = ECORE_SUCCESS;
1054 /* Read current attention bits/acks - safeguard against attentions
1055 * by guaranting work on a synchronized timeframe
1058 index = OSAL_LE16_TO_CPU(p_sb_attn->sb_index);
1059 attn_bits = OSAL_LE32_TO_CPU(p_sb_attn->atten_bits);
1060 attn_acks = OSAL_LE32_TO_CPU(p_sb_attn->atten_ack);
1061 } while (index != OSAL_LE16_TO_CPU(p_sb_attn->sb_index));
1062 p_sb_attn->sb_index = index;
1064 /* Attention / Deassertion are meaningful (and in correct state)
1065 * only when they differ and consistent with known state - deassertion
1066 * when previous attention & current ack, and assertion when current
1067 * attention with no previous attention
1069 asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
1070 ~p_sb_attn_sw->known_attn;
1071 deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
1072 p_sb_attn_sw->known_attn;
1074 if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100))
1076 "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
1077 index, attn_bits, attn_acks, asserted_bits,
1078 deasserted_bits, p_sb_attn_sw->known_attn);
1079 else if (asserted_bits == 0x100)
1081 "MFW indication via attention\n");
1083 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1084 "MFW indication [deassertion]\n");
1086 if (asserted_bits) {
1087 rc = ecore_int_assertion(p_hwfn, asserted_bits);
1092 if (deasserted_bits)
1093 rc = ecore_int_deassertion(p_hwfn, deasserted_bits);
1098 static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn,
1099 void OSAL_IOMEM *igu_addr, u32 ack_cons)
1101 struct igu_prod_cons_update igu_ack = { 0 };
1103 igu_ack.sb_id_and_flags =
1104 ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
1105 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
1106 (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
1107 (IGU_SEG_ACCESS_ATTN <<
1108 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
1110 DIRECT_REG_WR(p_hwfn, igu_addr, igu_ack.sb_id_and_flags);
1112 /* Both segments (interrupts & acks) are written to same place address;
1113 * Need to guarantee all commands will be received (in-order) by HW.
1115 OSAL_MMIOWB(p_hwfn->p_dev);
1116 OSAL_BARRIER(p_hwfn->p_dev);
1119 void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)
1121 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie;
1122 struct ecore_pi_info *pi_info = OSAL_NULL;
1123 struct ecore_sb_attn_info *sb_attn;
1124 struct ecore_sb_info *sb_info;
1131 if (!p_hwfn->p_sp_sb) {
1132 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sp_sb\n");
1136 sb_info = &p_hwfn->p_sp_sb->sb_info;
1137 arr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
1139 DP_ERR(p_hwfn->p_dev, "Status block is NULL - cannot ack interrupts\n");
1143 if (!p_hwfn->p_sb_attn) {
1144 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sb_attn");
1147 sb_attn = p_hwfn->p_sb_attn;
1149 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
1150 p_hwfn, p_hwfn->my_id);
1152 /* Disable ack for def status block. Required both for msix +
1153 * inta in non-mask mode, in inta does no harm.
1155 ecore_sb_ack(sb_info, IGU_INT_DISABLE, 0);
1157 /* Gather Interrupts/Attentions information */
1158 if (!sb_info->sb_virt) {
1159 DP_ERR(p_hwfn->p_dev, "Interrupt Status block is NULL - cannot check for new interrupts!\n");
1161 u32 tmp_index = sb_info->sb_ack;
1162 rc = ecore_sb_update_sb_idx(sb_info);
1163 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
1164 "Interrupt indices: 0x%08x --> 0x%08x\n",
1165 tmp_index, sb_info->sb_ack);
1168 if (!sb_attn || !sb_attn->sb_attn) {
1169 DP_ERR(p_hwfn->p_dev, "Attentions Status block is NULL - cannot check for new attentions!\n");
1171 u16 tmp_index = sb_attn->index;
1173 rc |= ecore_attn_update_idx(p_hwfn, sb_attn);
1174 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
1175 "Attention indices: 0x%08x --> 0x%08x\n",
1176 tmp_index, sb_attn->index);
1179 /* Check if we expect interrupts at this time. if not just ack them */
1180 if (!(rc & ECORE_SB_EVENT_MASK)) {
1181 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1185 /* Check the validity of the DPC ptt. If not ack interrupts and fail */
1186 if (!p_hwfn->p_dpc_ptt) {
1187 DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n");
1188 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1192 if (rc & ECORE_SB_ATT_IDX)
1193 ecore_int_attentions(p_hwfn);
1195 if (rc & ECORE_SB_IDX) {
1198 /* Since we only looked at the SB index, it's possible more
1199 * than a single protocol-index on the SB incremented.
1200 * Iterate over all configured protocol indices and check
1201 * whether something happened for each.
1203 for (pi = 0; pi < arr_size; pi++) {
1204 pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
1205 if (pi_info->comp_cb != OSAL_NULL)
1206 pi_info->comp_cb(p_hwfn, pi_info->cookie);
1210 if (sb_attn && (rc & ECORE_SB_ATT_IDX)) {
1211 /* This should be done before the interrupts are enabled,
1212 * since otherwise a new attention will be generated.
1214 ecore_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
1217 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1220 static void ecore_int_sb_attn_free(struct ecore_hwfn *p_hwfn)
1222 struct ecore_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
1227 if (p_sb->sb_attn) {
1228 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sb->sb_attn,
1230 SB_ATTN_ALIGNED_SIZE(p_hwfn));
1233 OSAL_FREE(p_hwfn->p_dev, p_sb);
1234 p_hwfn->p_sb_attn = OSAL_NULL;
1237 static void ecore_int_sb_attn_setup(struct ecore_hwfn *p_hwfn,
1238 struct ecore_ptt *p_ptt)
1240 struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1242 OSAL_MEMSET(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn));
1245 sb_info->known_attn = 0;
1247 /* Configure Attention Status Block in IGU */
1248 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L,
1249 DMA_LO(p_hwfn->p_sb_attn->sb_phys));
1250 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H,
1251 DMA_HI(p_hwfn->p_sb_attn->sb_phys));
1254 static void ecore_int_sb_attn_init(struct ecore_hwfn *p_hwfn,
1255 struct ecore_ptt *p_ptt,
1257 dma_addr_t sb_phy_addr)
1259 struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1262 sb_info->sb_attn = sb_virt_addr;
1263 sb_info->sb_phys = sb_phy_addr;
1265 /* Set the pointer to the AEU descriptors */
1266 sb_info->p_aeu_desc = aeu_descs;
1268 /* Calculate Parity Masks */
1269 OSAL_MEMSET(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS);
1270 for (i = 0; i < NUM_ATTN_REGS; i++) {
1271 /* j is array index, k is bit index */
1272 for (j = 0, k = 0; k < 32; j++) {
1273 struct aeu_invert_reg_bit *p_aeu;
1275 p_aeu = &aeu_descs[i].bits[j];
1276 if (ecore_int_is_parity_flag(p_hwfn, p_aeu))
1277 sb_info->parity_mask[i] |= 1 << k;
1279 k += ATTENTION_LENGTH(p_aeu->flags);
1281 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1282 "Attn Mask [Reg %d]: 0x%08x\n",
1283 i, sb_info->parity_mask[i]);
1286 /* Set the address of cleanup for the mcp attention */
1287 sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
1288 MISC_REG_AEU_GENERAL_ATTN_0;
1290 ecore_int_sb_attn_setup(p_hwfn, p_ptt);
1293 static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn,
1294 struct ecore_ptt *p_ptt)
1296 struct ecore_dev *p_dev = p_hwfn->p_dev;
1297 struct ecore_sb_attn_info *p_sb;
1298 dma_addr_t p_phys = 0;
1302 p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(*p_sb));
1304 DP_NOTICE(p_dev, true, "Failed to allocate `struct ecore_sb_attn_info'\n");
1309 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
1310 SB_ATTN_ALIGNED_SIZE(p_hwfn));
1312 DP_NOTICE(p_dev, true, "Failed to allocate status block (attentions)\n");
1313 OSAL_FREE(p_dev, p_sb);
1317 /* Attention setup */
1318 p_hwfn->p_sb_attn = p_sb;
1319 ecore_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys);
1321 return ECORE_SUCCESS;
1324 /* coalescing timeout = timeset << (timer_res + 1) */
1325 #define ECORE_CAU_DEF_RX_USECS 24
1326 #define ECORE_CAU_DEF_TX_USECS 48
1328 void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
1329 struct cau_sb_entry *p_sb_entry,
1330 u8 pf_id, u16 vf_number, u8 vf_valid)
1332 struct ecore_dev *p_dev = p_hwfn->p_dev;
1336 OSAL_MEMSET(p_sb_entry, 0, sizeof(*p_sb_entry));
1338 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
1339 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
1340 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
1341 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
1342 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
1344 cau_state = CAU_HC_DISABLE_STATE;
1346 if (p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
1347 cau_state = CAU_HC_ENABLE_STATE;
1348 if (!p_dev->rx_coalesce_usecs)
1349 p_dev->rx_coalesce_usecs = ECORE_CAU_DEF_RX_USECS;
1350 if (!p_dev->tx_coalesce_usecs)
1351 p_dev->tx_coalesce_usecs = ECORE_CAU_DEF_TX_USECS;
1354 /* Coalesce = (timeset << timer-res), timeset is 7bit wide */
1355 if (p_dev->rx_coalesce_usecs <= 0x7F)
1357 else if (p_dev->rx_coalesce_usecs <= 0xFF)
1361 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
1363 if (p_dev->tx_coalesce_usecs <= 0x7F)
1365 else if (p_dev->tx_coalesce_usecs <= 0xFF)
1369 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
1371 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
1372 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
1375 static void _ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
1376 struct ecore_ptt *p_ptt,
1377 u16 igu_sb_id, u32 pi_index,
1378 enum ecore_coalescing_fsm coalescing_fsm,
1381 struct cau_pi_entry pi_entry;
1382 u32 sb_offset, pi_offset;
1384 if (IS_VF(p_hwfn->p_dev))
1385 return;/* @@@TBD MichalK- VF CAU... */
1387 sb_offset = igu_sb_id * PIS_PER_SB;
1388 OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));
1390 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
1391 if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE)
1392 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
1394 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
1396 pi_offset = sb_offset + pi_index;
1397 if (p_hwfn->hw_init_done) {
1398 ecore_wr(p_hwfn, p_ptt,
1399 CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
1400 *((u32 *)&(pi_entry)));
1402 STORE_RT_REG(p_hwfn,
1403 CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
1404 *((u32 *)&(pi_entry)));
1408 void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
1409 struct ecore_ptt *p_ptt,
1410 struct ecore_sb_info *p_sb, u32 pi_index,
1411 enum ecore_coalescing_fsm coalescing_fsm,
1414 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, p_sb->igu_sb_id,
1415 pi_index, coalescing_fsm, timeset);
1418 void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
1419 struct ecore_ptt *p_ptt,
1420 dma_addr_t sb_phys, u16 igu_sb_id,
1421 u16 vf_number, u8 vf_valid)
1423 struct cau_sb_entry sb_entry;
1425 ecore_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
1426 vf_number, vf_valid);
1428 if (p_hwfn->hw_init_done) {
1429 /* Wide-bus, initialize via DMAE */
1430 u64 phys_addr = (u64)sb_phys;
1432 ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&phys_addr,
1433 CAU_REG_SB_ADDR_MEMORY +
1434 igu_sb_id * sizeof(u64), 2, 0);
1435 ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&sb_entry,
1436 CAU_REG_SB_VAR_MEMORY +
1437 igu_sb_id * sizeof(u64), 2, 0);
1439 /* Initialize Status Block Address */
1440 STORE_RT_REG_AGG(p_hwfn,
1441 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET+igu_sb_id*2,
1444 STORE_RT_REG_AGG(p_hwfn,
1445 CAU_REG_SB_VAR_MEMORY_RT_OFFSET+igu_sb_id*2,
1449 /* Configure pi coalescing if set */
1450 if (p_hwfn->p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
1451 /* eth will open queues for all tcs, so configure all of them
1452 * properly, rather than just the active ones
1454 u8 num_tc = p_hwfn->hw_info.num_hw_tc;
1456 u8 timeset, timer_res;
1459 /* timeset = (coalesce >> timer-res), timeset is 7bit wide */
1460 if (p_hwfn->p_dev->rx_coalesce_usecs <= 0x7F)
1462 else if (p_hwfn->p_dev->rx_coalesce_usecs <= 0xFF)
1466 timeset = (u8)(p_hwfn->p_dev->rx_coalesce_usecs >> timer_res);
1467 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
1468 ECORE_COAL_RX_STATE_MACHINE,
1471 if (p_hwfn->p_dev->tx_coalesce_usecs <= 0x7F)
1473 else if (p_hwfn->p_dev->tx_coalesce_usecs <= 0xFF)
1477 timeset = (u8)(p_hwfn->p_dev->tx_coalesce_usecs >> timer_res);
1478 for (i = 0; i < num_tc; i++) {
1479 _ecore_int_cau_conf_pi(p_hwfn, p_ptt,
1480 igu_sb_id, TX_PI(i),
1481 ECORE_COAL_TX_STATE_MACHINE,
1487 void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
1488 struct ecore_ptt *p_ptt,
1489 struct ecore_sb_info *sb_info)
1491 /* zero status block and ack counter */
1492 sb_info->sb_ack = 0;
1493 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1495 if (IS_PF(p_hwfn->p_dev))
1496 ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
1497 sb_info->igu_sb_id, 0, 0);
1500 struct ecore_igu_block *
1501 ecore_get_igu_free_sb(struct ecore_hwfn *p_hwfn, bool b_is_pf)
1503 struct ecore_igu_block *p_block;
1506 for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
1508 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
1510 if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
1511 !(p_block->status & ECORE_IGU_STATUS_FREE))
1514 if (!!(p_block->status & ECORE_IGU_STATUS_PF) ==
1522 static u16 ecore_get_pf_igu_sb_id(struct ecore_hwfn *p_hwfn,
1525 struct ecore_igu_block *p_block;
1528 for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
1530 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
1532 if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
1534 p_block->vector_number != vector_id)
1540 return ECORE_SB_INVALID_IDX;
1543 u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
1547 /* Assuming continuous set of IGU SBs dedicated for given PF */
1548 if (sb_id == ECORE_SP_SB_ID)
1549 igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
1550 else if (IS_PF(p_hwfn->p_dev))
1551 igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
1553 igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id);
1555 if (igu_sb_id == ECORE_SB_INVALID_IDX)
1556 DP_NOTICE(p_hwfn, true,
1557 "Slowpath SB vector %04x doesn't exist\n",
1559 else if (sb_id == ECORE_SP_SB_ID)
1560 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1561 "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
1563 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1564 "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
1569 enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
1570 struct ecore_ptt *p_ptt,
1571 struct ecore_sb_info *sb_info,
1573 dma_addr_t sb_phy_addr,
1576 sb_info->sb_virt = sb_virt_addr;
1577 sb_info->sb_phys = sb_phy_addr;
1579 sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id);
1581 if (sb_info->igu_sb_id == ECORE_SB_INVALID_IDX)
1584 /* Let the igu info reference the client's SB info */
1585 if (sb_id != ECORE_SP_SB_ID) {
1586 if (IS_PF(p_hwfn->p_dev)) {
1587 struct ecore_igu_info *p_info;
1588 struct ecore_igu_block *p_block;
1590 p_info = p_hwfn->hw_info.p_igu_info;
1591 p_block = &p_info->entry[sb_info->igu_sb_id];
1593 p_block->sb_info = sb_info;
1594 p_block->status &= ~ECORE_IGU_STATUS_FREE;
1595 p_info->usage.free_cnt--;
1597 ecore_vf_set_sb_info(p_hwfn, sb_id, sb_info);
1601 #ifdef ECORE_CONFIG_DIRECT_HWFN
1602 sb_info->p_hwfn = p_hwfn;
1604 sb_info->p_dev = p_hwfn->p_dev;
1606 /* The igu address will hold the absolute address that needs to be
1607 * written to for a specific status block
1609 if (IS_PF(p_hwfn->p_dev)) {
1610 sb_info->igu_addr = (u8 OSAL_IOMEM*)p_hwfn->regview +
1611 GTT_BAR0_MAP_REG_IGU_CMD +
1612 (sb_info->igu_sb_id << 3);
1616 (u8 OSAL_IOMEM*)p_hwfn->regview +
1617 PXP_VF_BAR0_START_IGU +
1618 ((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3);
1621 sb_info->flags |= ECORE_SB_INFO_INIT;
1623 ecore_int_sb_setup(p_hwfn, p_ptt, sb_info);
1625 return ECORE_SUCCESS;
1628 enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
1629 struct ecore_sb_info *sb_info,
1632 struct ecore_igu_info *p_info;
1633 struct ecore_igu_block *p_block;
1635 if (sb_info == OSAL_NULL)
1636 return ECORE_SUCCESS;
1638 /* zero status block and ack counter */
1639 sb_info->sb_ack = 0;
1640 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1642 if (IS_VF(p_hwfn->p_dev)) {
1643 ecore_vf_set_sb_info(p_hwfn, sb_id, OSAL_NULL);
1644 return ECORE_SUCCESS;
1647 p_info = p_hwfn->hw_info.p_igu_info;
1648 p_block = &p_info->entry[sb_info->igu_sb_id];
1650 /* Vector 0 is reserved to Default SB */
1651 if (p_block->vector_number == 0) {
1652 DP_ERR(p_hwfn, "Do Not free sp sb using this function");
1656 /* Lose reference to client's SB info, and fix counters */
1657 p_block->sb_info = OSAL_NULL;
1658 p_block->status |= ECORE_IGU_STATUS_FREE;
1659 p_info->usage.free_cnt++;
1661 return ECORE_SUCCESS;
1664 static void ecore_int_sp_sb_free(struct ecore_hwfn *p_hwfn)
1666 struct ecore_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
1671 if (p_sb->sb_info.sb_virt) {
1672 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
1673 p_sb->sb_info.sb_virt,
1674 p_sb->sb_info.sb_phys,
1675 SB_ALIGNED_SIZE(p_hwfn));
1678 OSAL_FREE(p_hwfn->p_dev, p_sb);
1679 p_hwfn->p_sp_sb = OSAL_NULL;
1682 static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
1683 struct ecore_ptt *p_ptt)
1685 struct ecore_sb_sp_info *p_sb;
1686 dma_addr_t p_phys = 0;
1690 p_sb = OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sb));
1692 DP_NOTICE(p_hwfn, true, "Failed to allocate `struct ecore_sb_info'\n");
1697 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
1699 SB_ALIGNED_SIZE(p_hwfn));
1701 DP_NOTICE(p_hwfn, true, "Failed to allocate status block\n");
1702 OSAL_FREE(p_hwfn->p_dev, p_sb);
1707 /* Status Block setup */
1708 p_hwfn->p_sp_sb = p_sb;
1709 ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info,
1710 p_virt, p_phys, ECORE_SP_SB_ID);
1712 OSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
1714 return ECORE_SUCCESS;
1717 enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
1718 ecore_int_comp_cb_t comp_cb,
1723 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
1724 enum _ecore_status_t rc = ECORE_NOMEM;
1727 /* Look for a free index */
1728 for (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
1729 if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL)
1732 p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
1733 p_sp_sb->pi_info_arr[pi].cookie = cookie;
1735 *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
1743 enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn,
1746 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
1748 if (p_sp_sb->pi_info_arr[pi].comp_cb == OSAL_NULL)
1751 p_sp_sb->pi_info_arr[pi].comp_cb = OSAL_NULL;
1752 p_sp_sb->pi_info_arr[pi].cookie = OSAL_NULL;
1754 return ECORE_SUCCESS;
1757 u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn)
1759 return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
1762 void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn,
1763 struct ecore_ptt *p_ptt,
1764 enum ecore_int_mode int_mode)
1766 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
1769 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1770 DP_INFO(p_hwfn, "FPGA - don't enable ATTN generation in IGU\n");
1771 igu_pf_conf &= ~IGU_PF_CONF_ATTN_BIT_EN;
1775 p_hwfn->p_dev->int_mode = int_mode;
1776 switch (p_hwfn->p_dev->int_mode) {
1777 case ECORE_INT_MODE_INTA:
1778 igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
1779 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1782 case ECORE_INT_MODE_MSI:
1783 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1784 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1787 case ECORE_INT_MODE_MSIX:
1788 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1790 case ECORE_INT_MODE_POLL:
1794 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
1797 static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn,
1798 struct ecore_ptt *p_ptt)
1801 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1802 DP_INFO(p_hwfn, "FPGA - Don't enable Attentions in IGU and MISC\n");
1807 /* Configure AEU signal change to produce attentions */
1808 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
1809 ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
1810 ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
1811 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
1813 /* Flush the writes to IGU */
1814 OSAL_MMIOWB(p_hwfn->p_dev);
1816 /* Unmask AEU signals toward IGU */
1817 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
1820 enum _ecore_status_t
1821 ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1822 enum ecore_int_mode int_mode)
1824 enum _ecore_status_t rc = ECORE_SUCCESS;
1827 /* @@@tmp - Starting with MFW 8.2.1.0 we've started hitting AVS stop
1828 * attentions. Since we're waiting for BRCM answer regarding this
1829 * attention, in the meanwhile we simply mask it.
1831 tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0);
1833 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp);
1835 ecore_int_igu_enable_attn(p_hwfn, p_ptt);
1837 if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
1838 rc = OSAL_SLOWPATH_IRQ_REQ(p_hwfn);
1839 if (rc != ECORE_SUCCESS) {
1840 DP_NOTICE(p_hwfn, true, "Slowpath IRQ request failed\n");
1841 return ECORE_NORESOURCES;
1843 p_hwfn->b_int_requested = true;
1846 /* Enable interrupt Generation */
1847 ecore_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
1849 p_hwfn->b_int_enabled = 1;
1854 void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
1855 struct ecore_ptt *p_ptt)
1857 p_hwfn->b_int_enabled = 0;
1859 if (IS_VF(p_hwfn->p_dev))
1862 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
1865 #define IGU_CLEANUP_SLEEP_LENGTH (1000)
1866 static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
1867 struct ecore_ptt *p_ptt,
1872 u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
1873 u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id;
1874 u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
1875 u8 type = 0; /* FIXME MichalS type??? */
1877 OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 -
1878 IGU_REG_CLEANUP_STATUS_0) != 0x200);
1880 /* USE Control Command Register to perform cleanup. There is an
1881 * option to do this using IGU bar, but then it can't be used for VFs.
1884 /* Set the data field */
1885 SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
1886 SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, type);
1887 SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
1889 /* Set the control register */
1890 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
1891 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
1892 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
1894 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
1896 OSAL_BARRIER(p_hwfn->p_dev);
1898 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
1900 /* Flush the write to IGU */
1901 OSAL_MMIOWB(p_hwfn->p_dev);
1903 /* calculate where to read the status bit from */
1904 sb_bit = 1 << (igu_sb_id % 32);
1905 sb_bit_addr = igu_sb_id / 32 * sizeof(u32);
1907 sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type);
1909 /* Now wait for the command to complete */
1910 while (--sleep_cnt) {
1911 val = ecore_rd(p_hwfn, p_ptt, sb_bit_addr);
1912 if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
1918 DP_NOTICE(p_hwfn, true,
1919 "Timeout waiting for clear status 0x%08x [for sb %d]\n",
1923 void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
1924 struct ecore_ptt *p_ptt,
1925 u16 igu_sb_id, u16 opaque, bool b_set)
1927 struct ecore_igu_block *p_block;
1930 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
1931 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1932 "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n",
1933 igu_sb_id, p_block->function_id, p_block->is_pf,
1934 p_block->vector_number);
1938 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque);
1941 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque);
1943 /* Wait for the IGU SB to cleanup */
1944 for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) {
1947 val = ecore_rd(p_hwfn, p_ptt,
1948 IGU_REG_WRITE_DONE_PENDING +
1949 ((igu_sb_id / 32) * 4));
1950 if (val & (1 << (igu_sb_id % 32)))
1955 if (i == IGU_CLEANUP_SLEEP_LENGTH)
1956 DP_NOTICE(p_hwfn, true,
1957 "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
1960 /* Clear the CAU for the SB */
1961 for (pi = 0; pi < 12; pi++)
1962 ecore_wr(p_hwfn, p_ptt,
1963 CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0);
1966 void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
1967 struct ecore_ptt *p_ptt,
1971 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
1972 struct ecore_igu_block *p_block;
1976 /* @@@TBD MichalK temporary... should be moved to init-tool... */
1977 val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
1978 val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
1979 val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
1980 ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
1984 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
1986 p_block = &p_info->entry[igu_sb_id];
1988 if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
1990 (p_block->status & ECORE_IGU_STATUS_DSB))
1993 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id,
1994 p_hwfn->hw_info.opaque_fid,
1999 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
2001 p_hwfn->hw_info.opaque_fid,
2005 int ecore_int_igu_reset_cam(struct ecore_hwfn *p_hwfn,
2006 struct ecore_ptt *p_ptt)
2008 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
2009 struct ecore_igu_block *p_block;
2014 if (!RESC_NUM(p_hwfn, ECORE_SB)) {
2015 /* We're using an old MFW - have to prevent any switching
2016 * of SBs between PF and VFs as later driver wouldn't be
2017 * able to tell which belongs to which.
2019 p_info->b_allow_pf_vf_change = false;
2021 /* Use the numbers the MFW have provided -
2022 * don't forget MFW accounts for the default SB as well.
2024 p_info->b_allow_pf_vf_change = true;
2026 if (p_info->usage.cnt != RESC_NUM(p_hwfn, ECORE_SB) - 1) {
2028 "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n",
2029 RESC_NUM(p_hwfn, ECORE_SB) - 1,
2031 p_info->usage.cnt = RESC_NUM(p_hwfn, ECORE_SB) - 1;
2034 /* TODO - how do we learn about VF SBs from MFW? */
2035 if (IS_PF_SRIOV(p_hwfn)) {
2036 u16 vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
2038 if (vfs != p_info->usage.iov_cnt)
2039 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2040 "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n",
2041 p_info->usage.iov_cnt, vfs);
2043 /* At this point we know how many SBs we have totally
2044 * in IGU + number of PF SBs. So we can validate that
2045 * we'd have sufficient for VF.
2047 if (vfs > p_info->usage.free_cnt +
2048 p_info->usage.free_cnt_iov -
2049 p_info->usage.cnt) {
2050 DP_NOTICE(p_hwfn, true,
2051 "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n",
2052 p_info->usage.free_cnt +
2053 p_info->usage.free_cnt_iov,
2054 p_info->usage.cnt, vfs);
2058 /* Currently cap the number of VFs SBs by the
2061 p_info->usage.iov_cnt = vfs;
2065 /* Mark all SBs as free, now in the right PF/VFs division */
2066 p_info->usage.free_cnt = p_info->usage.cnt;
2067 p_info->usage.free_cnt_iov = p_info->usage.iov_cnt;
2068 p_info->usage.orig = p_info->usage.cnt;
2069 p_info->usage.iov_orig = p_info->usage.iov_cnt;
2071 /* We now proceed to re-configure the IGU cam to reflect the initial
2072 * configuration. We can start with the Default SB.
2074 pf_sbs = p_info->usage.cnt;
2075 vf_sbs = p_info->usage.iov_cnt;
2077 for (igu_sb_id = p_info->igu_dsb_id;
2078 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
2080 p_block = &p_info->entry[igu_sb_id];
2083 if (!(p_block->status & ECORE_IGU_STATUS_VALID))
2086 if (p_block->status & ECORE_IGU_STATUS_DSB) {
2087 p_block->function_id = p_hwfn->rel_pf_id;
2089 p_block->vector_number = 0;
2090 p_block->status = ECORE_IGU_STATUS_VALID |
2091 ECORE_IGU_STATUS_PF |
2092 ECORE_IGU_STATUS_DSB;
2093 } else if (pf_sbs) {
2095 p_block->function_id = p_hwfn->rel_pf_id;
2097 p_block->vector_number = p_info->usage.cnt - pf_sbs;
2098 p_block->status = ECORE_IGU_STATUS_VALID |
2099 ECORE_IGU_STATUS_PF |
2100 ECORE_IGU_STATUS_FREE;
2101 } else if (vf_sbs) {
2102 p_block->function_id =
2103 p_hwfn->p_dev->p_iov_info->first_vf_in_pf +
2104 p_info->usage.iov_cnt - vf_sbs;
2106 p_block->vector_number = 0;
2107 p_block->status = ECORE_IGU_STATUS_VALID |
2108 ECORE_IGU_STATUS_FREE;
2111 p_block->function_id = 0;
2113 p_block->vector_number = 0;
2116 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
2117 p_block->function_id);
2118 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf);
2119 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER,
2120 p_block->vector_number);
2122 /* VF entries would be enabled when VF is initializaed */
2123 SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf);
2125 rval = ecore_rd(p_hwfn, p_ptt,
2126 IGU_REG_MAPPING_MEMORY +
2127 sizeof(u32) * igu_sb_id);
2130 ecore_wr(p_hwfn, p_ptt,
2131 IGU_REG_MAPPING_MEMORY +
2132 sizeof(u32) * igu_sb_id,
2135 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2136 "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n",
2137 igu_sb_id, p_block->function_id,
2138 p_block->is_pf, p_block->vector_number,
2146 int ecore_int_igu_reset_cam_default(struct ecore_hwfn *p_hwfn,
2147 struct ecore_ptt *p_ptt)
2149 struct ecore_sb_cnt_info *p_cnt = &p_hwfn->hw_info.p_igu_info->usage;
2151 /* Return all the usage indications to default prior to the reset;
2152 * The reset expects the !orig to reflect the initial status of the
2153 * SBs, and would re-calculate the originals based on those.
2155 p_cnt->cnt = p_cnt->orig;
2156 p_cnt->free_cnt = p_cnt->orig;
2157 p_cnt->iov_cnt = p_cnt->iov_orig;
2158 p_cnt->free_cnt_iov = p_cnt->iov_orig;
2160 p_cnt->iov_orig = 0;
2162 /* TODO - we probably need to re-configure the CAU as well... */
2163 return ecore_int_igu_reset_cam(p_hwfn, p_ptt);
2166 static void ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn,
2167 struct ecore_ptt *p_ptt,
2170 u32 val = ecore_rd(p_hwfn, p_ptt,
2171 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id);
2172 struct ecore_igu_block *p_block;
2174 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
2176 /* Fill the block information */
2177 p_block->function_id = GET_FIELD(val,
2178 IGU_MAPPING_LINE_FUNCTION_NUMBER);
2179 p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
2180 p_block->vector_number = GET_FIELD(val,
2181 IGU_MAPPING_LINE_VECTOR_NUMBER);
2182 p_block->igu_sb_id = igu_sb_id;
2185 enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
2186 struct ecore_ptt *p_ptt)
2188 struct ecore_igu_info *p_igu_info;
2189 struct ecore_igu_block *p_block;
2190 u32 min_vf = 0, max_vf = 0;
2193 p_hwfn->hw_info.p_igu_info = OSAL_ZALLOC(p_hwfn->p_dev,
2195 sizeof(*p_igu_info));
2196 if (!p_hwfn->hw_info.p_igu_info)
2198 p_igu_info = p_hwfn->hw_info.p_igu_info;
2200 /* Distinguish between existent and onn-existent default SB */
2201 p_igu_info->igu_dsb_id = ECORE_SB_INVALID_IDX;
2203 /* Find the range of VF ids whose SB belong to this PF */
2204 if (p_hwfn->p_dev->p_iov_info) {
2205 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
2207 min_vf = p_iov->first_vf_in_pf;
2208 max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs;
2212 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
2214 /* Read current entry; Notice it might not belong to this PF */
2215 ecore_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id);
2216 p_block = &p_igu_info->entry[igu_sb_id];
2218 if ((p_block->is_pf) &&
2219 (p_block->function_id == p_hwfn->rel_pf_id)) {
2220 p_block->status = ECORE_IGU_STATUS_PF |
2221 ECORE_IGU_STATUS_VALID |
2222 ECORE_IGU_STATUS_FREE;
2224 if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX)
2225 p_igu_info->usage.cnt++;
2226 } else if (!(p_block->is_pf) &&
2227 (p_block->function_id >= min_vf) &&
2228 (p_block->function_id < max_vf)) {
2229 /* Available for VFs of this PF */
2230 p_block->status = ECORE_IGU_STATUS_VALID |
2231 ECORE_IGU_STATUS_FREE;
2233 if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX)
2234 p_igu_info->usage.iov_cnt++;
2237 /* Mark the First entry belonging to the PF or its VFs
2238 * as the default SB [we'll reset IGU prior to first usage].
2240 if ((p_block->status & ECORE_IGU_STATUS_VALID) &&
2241 (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX)) {
2242 p_igu_info->igu_dsb_id = igu_sb_id;
2243 p_block->status |= ECORE_IGU_STATUS_DSB;
2246 /* While this isn't suitable for all clients, limit number
2247 * of prints by having each PF print only its entries with the
2248 * exception of PF0 which would print everything.
2250 if ((p_block->status & ECORE_IGU_STATUS_VALID) ||
2251 (p_hwfn->abs_pf_id == 0))
2252 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2253 "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
2254 igu_sb_id, p_block->function_id,
2255 p_block->is_pf, p_block->vector_number);
2258 if (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX) {
2259 DP_NOTICE(p_hwfn, true,
2260 "IGU CAM returned invalid values igu_dsb_id=0x%x\n",
2261 p_igu_info->igu_dsb_id);
2265 /* All non default SB are considered free at this point */
2266 p_igu_info->usage.free_cnt = p_igu_info->usage.cnt;
2267 p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt;
2269 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2270 "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n",
2271 p_igu_info->igu_dsb_id, p_igu_info->usage.cnt,
2272 p_igu_info->usage.iov_cnt);
2274 return ECORE_SUCCESS;
2277 enum _ecore_status_t
2278 ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2279 u16 sb_id, bool b_to_vf)
2281 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
2282 struct ecore_igu_block *p_block = OSAL_NULL;
2283 u16 igu_sb_id = 0, vf_num = 0;
2286 if (IS_VF(p_hwfn->p_dev) || !IS_PF_SRIOV(p_hwfn))
2289 if (sb_id == ECORE_SP_SB_ID)
2292 if (!p_info->b_allow_pf_vf_change) {
2293 DP_INFO(p_hwfn, "Can't relocate SBs as MFW is too old.\n");
2297 /* If we're moving a SB from PF to VF, the client had to specify
2298 * which vector it wants to move.
2301 igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
2302 if (igu_sb_id == ECORE_SB_INVALID_IDX)
2306 /* If we're moving a SB from VF to PF, need to validate there isn't
2307 * already a line configured for that vector.
2310 if (ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1) !=
2311 ECORE_SB_INVALID_IDX)
2315 /* We need to validate that the SB can actually be relocated.
2316 * This would also handle the previous case where we've explicitly
2317 * stated which IGU SB needs to move.
2319 for (; igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
2321 p_block = &p_info->entry[igu_sb_id];
2323 if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
2324 !(p_block->status & ECORE_IGU_STATUS_FREE) ||
2325 (!!(p_block->status & ECORE_IGU_STATUS_PF) != b_to_vf)) {
2335 if (igu_sb_id == ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev)) {
2336 DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV),
2337 "Failed to find a free SB to move\n");
2341 /* At this point, p_block points to the SB we want to relocate */
2343 p_block->status &= ~ECORE_IGU_STATUS_PF;
2345 /* It doesn't matter which VF number we choose, since we're
2346 * going to disable the line; But let's keep it in range.
2348 vf_num = (u16)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
2350 p_block->function_id = (u8)vf_num;
2352 p_block->vector_number = 0;
2354 p_info->usage.cnt--;
2355 p_info->usage.free_cnt--;
2356 p_info->usage.iov_cnt++;
2357 p_info->usage.free_cnt_iov++;
2359 /* TODO - if SBs aren't really the limiting factor,
2360 * then it might not be accurate [in the since that
2361 * we might not need decrement the feature].
2363 p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]--;
2364 p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]++;
2366 p_block->status |= ECORE_IGU_STATUS_PF;
2367 p_block->function_id = p_hwfn->rel_pf_id;
2369 p_block->vector_number = sb_id + 1;
2371 p_info->usage.cnt++;
2372 p_info->usage.free_cnt++;
2373 p_info->usage.iov_cnt--;
2374 p_info->usage.free_cnt_iov--;
2376 p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]++;
2377 p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]--;
2380 /* Update the IGU and CAU with the new configuration */
2381 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
2382 p_block->function_id);
2383 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf);
2384 SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf);
2385 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER,
2386 p_block->vector_number);
2388 ecore_wr(p_hwfn, p_ptt,
2389 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id,
2392 ecore_int_cau_conf_sb(p_hwfn, p_ptt, 0,
2394 p_block->is_pf ? 0 : 1);
2396 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2397 "Relocation: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
2398 igu_sb_id, p_block->function_id,
2399 p_block->is_pf, p_block->vector_number);
2401 return ECORE_SUCCESS;
2405 * @brief Initialize igu runtime registers
2409 void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn)
2411 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
2413 STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
2416 #define LSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - \
2417 IGU_CMD_INT_ACK_BASE)
2418 #define MSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - \
2419 IGU_CMD_INT_ACK_BASE)
2420 u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn)
2422 u32 intr_status_hi = 0, intr_status_lo = 0;
2423 u64 intr_status = 0;
2425 intr_status_lo = REG_RD(p_hwfn,
2426 GTT_BAR0_MAP_REG_IGU_CMD +
2427 LSB_IGU_CMD_ADDR * 8);
2428 intr_status_hi = REG_RD(p_hwfn,
2429 GTT_BAR0_MAP_REG_IGU_CMD +
2430 MSB_IGU_CMD_ADDR * 8);
2431 intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
2436 static void ecore_int_sp_dpc_setup(struct ecore_hwfn *p_hwfn)
2438 OSAL_DPC_INIT(p_hwfn->sp_dpc, p_hwfn);
2439 p_hwfn->b_sp_dpc_enabled = true;
2442 static enum _ecore_status_t ecore_int_sp_dpc_alloc(struct ecore_hwfn *p_hwfn)
2444 p_hwfn->sp_dpc = OSAL_DPC_ALLOC(p_hwfn);
2445 if (!p_hwfn->sp_dpc)
2448 return ECORE_SUCCESS;
2451 static void ecore_int_sp_dpc_free(struct ecore_hwfn *p_hwfn)
2453 OSAL_FREE(p_hwfn->p_dev, p_hwfn->sp_dpc);
2454 p_hwfn->sp_dpc = OSAL_NULL;
2457 enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn,
2458 struct ecore_ptt *p_ptt)
2460 enum _ecore_status_t rc = ECORE_SUCCESS;
2462 rc = ecore_int_sp_dpc_alloc(p_hwfn);
2463 if (rc != ECORE_SUCCESS) {
2464 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp dpc mem\n");
2468 rc = ecore_int_sp_sb_alloc(p_hwfn, p_ptt);
2469 if (rc != ECORE_SUCCESS) {
2470 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp sb mem\n");
2474 rc = ecore_int_sb_attn_alloc(p_hwfn, p_ptt);
2475 if (rc != ECORE_SUCCESS)
2476 DP_ERR(p_hwfn->p_dev, "Failed to allocate sb attn mem\n");
2481 void ecore_int_free(struct ecore_hwfn *p_hwfn)
2483 ecore_int_sp_sb_free(p_hwfn);
2484 ecore_int_sb_attn_free(p_hwfn);
2485 ecore_int_sp_dpc_free(p_hwfn);
2488 void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
2490 if (!p_hwfn || !p_hwfn->p_sp_sb || !p_hwfn->p_sb_attn)
2493 ecore_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
2494 ecore_int_sb_attn_setup(p_hwfn, p_ptt);
2495 ecore_int_sp_dpc_setup(p_hwfn);
2498 void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
2499 struct ecore_sb_cnt_info *p_sb_cnt_info)
2501 struct ecore_igu_info *p_igu_info = p_hwfn->hw_info.p_igu_info;
2503 if (!p_igu_info || !p_sb_cnt_info)
2506 OSAL_MEMCPY(p_sb_cnt_info, &p_igu_info->usage,
2507 sizeof(*p_sb_cnt_info));
2510 void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev)
2514 for_each_hwfn(p_dev, i)
2515 p_dev->hwfns[i].b_int_requested = false;
2518 void ecore_int_attn_clr_enable(struct ecore_dev *p_dev, bool clr_enable)
2520 p_dev->attn_clr_en = clr_enable;
2523 enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
2524 struct ecore_ptt *p_ptt,
2525 u8 timer_res, u16 sb_id, bool tx)
2527 struct cau_sb_entry sb_entry;
2528 enum _ecore_status_t rc;
2530 if (!p_hwfn->hw_init_done) {
2531 DP_ERR(p_hwfn, "hardware not initialized yet\n");
2535 rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2536 sb_id * sizeof(u64),
2537 (u64)(osal_uintptr_t)&sb_entry, 2, 0);
2538 if (rc != ECORE_SUCCESS) {
2539 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2544 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
2546 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
2548 rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
2549 (u64)(osal_uintptr_t)&sb_entry,
2550 CAU_REG_SB_VAR_MEMORY +
2551 sb_id * sizeof(u64), 2, 0);
2552 if (rc != ECORE_SUCCESS) {
2553 DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc);
2560 enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
2561 struct ecore_ptt *p_ptt,
2562 struct ecore_sb_info *p_sb,
2563 struct ecore_sb_info_dbg *p_info)
2565 u16 sbid = p_sb->igu_sb_id;
2568 if (IS_VF(p_hwfn->p_dev))
2571 if (sbid > NUM_OF_SBS(p_hwfn->p_dev))
2574 p_info->igu_prod = ecore_rd(p_hwfn, p_ptt,
2575 IGU_REG_PRODUCER_MEMORY + sbid * 4);
2576 p_info->igu_cons = ecore_rd(p_hwfn, p_ptt,
2577 IGU_REG_CONSUMER_MEM + sbid * 4);
2579 for (i = 0; i < PIS_PER_SB; i++)
2580 p_info->pi[i] = (u16)ecore_rd(p_hwfn, p_ptt,
2582 sbid * 4 * PIS_PER_SB + i * 4);
2584 return ECORE_SUCCESS;