2 * Copyright (c) 2017-2018 Cavium, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
35 #include "ecore_status.h"
38 #include "ecore_mcp.h"
39 #include "mcp_public.h"
42 #include "ecore_init_fw_funcs.h"
43 #include "ecore_sriov.h"
45 #include "ecore_iov_api.h"
46 #include "ecore_gtt_reg_addr.h"
47 #include "ecore_iro.h"
48 #include "ecore_dcbx.h"
49 #include "ecore_sp_commands.h"
51 #define CHIP_MCP_RESP_ITER_US 10
52 #define EMUL_MCP_RESP_ITER_US 1000 * 1000
54 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
55 #define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
57 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
58 ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
61 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
62 ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
64 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
65 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
66 OFFSETOF(struct public_drv_mb, _field), _val)
68 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
69 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
70 OFFSETOF(struct public_drv_mb, _field))
72 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
73 DRV_ID_PDA_COMP_VER_OFFSET)
75 #define MCP_BYTES_PER_MBIT_OFFSET 17
79 static int loaded_port[MAX_NUM_PORTS] = { 0 };
82 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
84 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
89 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn,
90 struct ecore_ptt *p_ptt)
92 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
94 u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
96 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
98 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
99 "port_addr = 0x%x, port_id 0x%02x\n",
100 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
103 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn,
104 struct ecore_ptt *p_ptt)
106 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
111 if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
115 if (!p_hwfn->mcp_info->public_base)
118 for (i = 0; i < length; i++) {
119 tmp = ecore_rd(p_hwfn, p_ptt,
120 p_hwfn->mcp_info->mfw_mb_addr +
121 (i << 2) + sizeof(u32));
123 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
124 OSAL_BE32_TO_CPU(tmp);
128 struct ecore_mcp_cmd_elem {
129 osal_list_entry_t list;
130 struct ecore_mcp_mb_params *p_mb_params;
131 u16 expected_seq_num;
135 /* Must be called while cmd_lock is acquired */
136 static struct ecore_mcp_cmd_elem *
137 ecore_mcp_cmd_add_elem(struct ecore_hwfn *p_hwfn,
138 struct ecore_mcp_mb_params *p_mb_params,
139 u16 expected_seq_num)
141 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
143 p_cmd_elem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
144 sizeof(*p_cmd_elem));
146 DP_NOTICE(p_hwfn, false,
147 "Failed to allocate `struct ecore_mcp_cmd_elem'\n");
151 p_cmd_elem->p_mb_params = p_mb_params;
152 p_cmd_elem->expected_seq_num = expected_seq_num;
153 OSAL_LIST_PUSH_HEAD(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
158 /* Must be called while cmd_lock is acquired */
159 static void ecore_mcp_cmd_del_elem(struct ecore_hwfn *p_hwfn,
160 struct ecore_mcp_cmd_elem *p_cmd_elem)
162 OSAL_LIST_REMOVE_ENTRY(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
163 OSAL_FREE(p_hwfn->p_dev, p_cmd_elem);
166 /* Must be called while cmd_lock is acquired */
167 static struct ecore_mcp_cmd_elem *
168 ecore_mcp_cmd_get_elem(struct ecore_hwfn *p_hwfn, u16 seq_num)
170 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
172 OSAL_LIST_FOR_EACH_ENTRY(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list,
173 struct ecore_mcp_cmd_elem) {
174 if (p_cmd_elem->expected_seq_num == seq_num)
181 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
183 if (p_hwfn->mcp_info) {
184 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL, *p_tmp;
186 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
187 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
189 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
190 OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_cmd_elem, p_tmp,
191 &p_hwfn->mcp_info->cmd_list, list,
192 struct ecore_mcp_cmd_elem) {
193 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
195 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
197 #ifdef CONFIG_ECORE_LOCK_ALLOC
198 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->cmd_lock);
199 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->link_lock);
203 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
204 p_hwfn->mcp_info = OSAL_NULL;
206 return ECORE_SUCCESS;
209 enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
210 struct ecore_ptt *p_ptt)
212 struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
213 u32 drv_mb_offsize, mfw_mb_offsize;
214 u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
217 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
218 DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
219 p_info->public_base = 0;
224 p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
225 if (!p_info->public_base)
228 p_info->public_base |= GRCBASE_MCP;
230 /* Calculate the driver and MFW mailbox address */
231 drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
232 SECTION_OFFSIZE_ADDR(p_info->public_base,
234 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
235 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
236 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
237 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
239 /* Set the MFW MB address */
240 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
241 SECTION_OFFSIZE_ADDR(p_info->public_base,
243 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
244 p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
245 p_info->mfw_mb_addr);
247 /* Get the current driver mailbox sequence before sending
250 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
251 DRV_MSG_SEQ_NUMBER_MASK;
253 /* Get current FW pulse sequence */
254 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
257 p_info->mcp_hist = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
259 return ECORE_SUCCESS;
262 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
263 struct ecore_ptt *p_ptt)
265 struct ecore_mcp_info *p_info;
268 /* Allocate mcp_info structure */
269 p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
270 sizeof(*p_hwfn->mcp_info));
271 if (!p_hwfn->mcp_info)
273 p_info = p_hwfn->mcp_info;
275 /* Initialize the MFW spinlocks */
276 #ifdef CONFIG_ECORE_LOCK_ALLOC
277 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock);
278 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock);
280 OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock);
281 OSAL_SPIN_LOCK_INIT(&p_info->link_lock);
283 OSAL_LIST_INIT(&p_info->cmd_list);
285 if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
286 DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
287 /* Do not free mcp_info here, since public_base indicate that
288 * the MCP is not initialized
290 return ECORE_SUCCESS;
293 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
294 p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
295 p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
296 if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
299 return ECORE_SUCCESS;
302 DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
303 ecore_mcp_free(p_hwfn);
307 static void ecore_mcp_reread_offsets(struct ecore_hwfn *p_hwfn,
308 struct ecore_ptt *p_ptt)
310 u32 generic_por_0 = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
312 /* Use MCP history register to check if MCP reset occurred between init
315 if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
316 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
317 "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
318 p_hwfn->mcp_info->mcp_hist, generic_por_0);
320 ecore_load_mcp_offsets(p_hwfn, p_ptt);
321 ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
325 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
326 struct ecore_ptt *p_ptt)
328 u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
329 enum _ecore_status_t rc = ECORE_SUCCESS;
332 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
333 delay = EMUL_MCP_RESP_ITER_US;
336 if (p_hwfn->mcp_info->b_block_cmd) {
337 DP_NOTICE(p_hwfn, false,
338 "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
339 return ECORE_ABORTED;
342 /* Ensure that only a single thread is accessing the mailbox */
343 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
345 org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
347 /* Set drv command along with the updated sequence */
348 ecore_mcp_reread_offsets(p_hwfn, p_ptt);
349 seq = ++p_hwfn->mcp_info->drv_mb_seq;
350 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
353 /* Wait for MFW response */
355 /* Give the FW up to 500 second (50*1000*10usec) */
356 } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
357 MISCS_REG_GENERIC_POR_0)) &&
358 (cnt++ < ECORE_MCP_RESET_RETRIES));
360 if (org_mcp_reset_seq !=
361 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
362 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
363 "MCP was reset after %d usec\n", cnt * delay);
365 DP_ERR(p_hwfn, "Failed to reset MCP\n");
369 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
374 /* Must be called while cmd_lock is acquired */
375 static bool ecore_mcp_has_pending_cmd(struct ecore_hwfn *p_hwfn)
377 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
379 /* There is at most one pending command at a certain time, and if it
380 * exists - it is placed at the HEAD of the list.
382 if (!OSAL_LIST_IS_EMPTY(&p_hwfn->mcp_info->cmd_list)) {
383 p_cmd_elem = OSAL_LIST_FIRST_ENTRY(&p_hwfn->mcp_info->cmd_list,
384 struct ecore_mcp_cmd_elem,
386 return !p_cmd_elem->b_is_completed;
392 /* Must be called while cmd_lock is acquired */
393 static enum _ecore_status_t
394 ecore_mcp_update_pending_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
396 struct ecore_mcp_mb_params *p_mb_params;
397 struct ecore_mcp_cmd_elem *p_cmd_elem;
401 mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
402 seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
404 /* Return if no new non-handled response has been received */
405 if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
408 p_cmd_elem = ecore_mcp_cmd_get_elem(p_hwfn, seq_num);
411 "Failed to find a pending mailbox cmd that expects sequence number %d\n",
413 return ECORE_UNKNOWN_ERROR;
416 p_mb_params = p_cmd_elem->p_mb_params;
418 /* Get the MFW response along with the sequence number */
419 p_mb_params->mcp_resp = mcp_resp;
421 /* Get the MFW param */
422 p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
424 /* Get the union data */
425 if (p_mb_params->p_data_dst != OSAL_NULL &&
426 p_mb_params->data_dst_size) {
427 u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
428 OFFSETOF(struct public_drv_mb,
430 ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
431 union_data_addr, p_mb_params->data_dst_size);
434 p_cmd_elem->b_is_completed = true;
436 return ECORE_SUCCESS;
439 /* Must be called while cmd_lock is acquired */
440 static void __ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
441 struct ecore_ptt *p_ptt,
442 struct ecore_mcp_mb_params *p_mb_params,
445 union drv_union_data union_data;
448 /* Set the union data */
449 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
450 OFFSETOF(struct public_drv_mb, union_data);
451 OSAL_MEM_ZERO(&union_data, sizeof(union_data));
452 if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size)
453 OSAL_MEMCPY(&union_data, p_mb_params->p_data_src,
454 p_mb_params->data_src_size);
455 ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
458 /* Set the drv param */
459 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
461 /* Set the drv command along with the sequence number */
462 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
464 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
465 "MFW mailbox: command 0x%08x param 0x%08x\n",
466 (p_mb_params->cmd | seq_num), p_mb_params->param);
469 static void ecore_mcp_cmd_set_blocking(struct ecore_hwfn *p_hwfn,
472 p_hwfn->mcp_info->b_block_cmd = block_cmd;
474 DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
475 block_cmd ? "Block" : "Unblock");
478 static void ecore_mcp_print_cpu_info(struct ecore_hwfn *p_hwfn,
479 struct ecore_ptt *p_ptt)
481 u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
483 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
484 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
485 cpu_pc_0 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
486 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
487 cpu_pc_1 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
488 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
489 cpu_pc_2 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
491 DP_NOTICE(p_hwfn, false,
492 "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
493 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
496 static enum _ecore_status_t
497 _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
498 struct ecore_mcp_mb_params *p_mb_params,
499 u32 max_retries, u32 delay)
501 struct ecore_mcp_cmd_elem *p_cmd_elem;
504 enum _ecore_status_t rc = ECORE_SUCCESS;
506 /* Wait until the mailbox is non-occupied */
508 /* Exit the loop if there is no pending command, or if the
509 * pending command is completed during this iteration.
510 * The spinlock stays locked until the command is sent.
513 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
515 if (!ecore_mcp_has_pending_cmd(p_hwfn))
518 rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
519 if (rc == ECORE_SUCCESS)
521 else if (rc != ECORE_AGAIN)
524 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
526 } while (++cnt < max_retries);
528 if (cnt >= max_retries) {
529 DP_NOTICE(p_hwfn, false,
530 "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
531 p_mb_params->cmd, p_mb_params->param);
535 /* Send the mailbox command */
536 ecore_mcp_reread_offsets(p_hwfn, p_ptt);
537 seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
538 p_cmd_elem = ecore_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
542 __ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
543 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
545 /* Wait for the MFW response */
547 /* Exit the loop if the command is already completed, or if the
548 * command is completed during this iteration.
549 * The spinlock stays locked until the list element is removed.
553 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
555 if (p_cmd_elem->b_is_completed)
558 rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
559 if (rc == ECORE_SUCCESS)
561 else if (rc != ECORE_AGAIN)
564 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
565 } while (++cnt < max_retries);
567 if (cnt >= max_retries) {
568 DP_NOTICE(p_hwfn, false,
569 "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
570 p_mb_params->cmd, p_mb_params->param);
571 ecore_mcp_print_cpu_info(p_hwfn, p_ptt);
573 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
574 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
575 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
577 ecore_mcp_cmd_set_blocking(p_hwfn, true);
578 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
582 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
583 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
585 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
586 "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
587 p_mb_params->mcp_resp, p_mb_params->mcp_param,
588 (cnt * delay) / 1000, (cnt * delay) % 1000);
590 /* Clear the sequence number from the MFW response */
591 p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
593 return ECORE_SUCCESS;
596 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
600 static enum _ecore_status_t ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
601 struct ecore_ptt *p_ptt,
602 struct ecore_mcp_mb_params *p_mb_params)
604 osal_size_t union_data_size = sizeof(union drv_union_data);
605 u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
606 u32 delay = CHIP_MCP_RESP_ITER_US;
609 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
610 delay = EMUL_MCP_RESP_ITER_US;
611 /* There is a built-in delay of 100usec in each MFW response read */
612 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
616 /* MCP not initialized */
617 if (!ecore_mcp_is_init(p_hwfn)) {
618 DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n");
622 if (p_mb_params->data_src_size > union_data_size ||
623 p_mb_params->data_dst_size > union_data_size) {
625 "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
626 p_mb_params->data_src_size, p_mb_params->data_dst_size,
631 if (p_hwfn->mcp_info->b_block_cmd) {
632 DP_NOTICE(p_hwfn, false,
633 "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
634 p_mb_params->cmd, p_mb_params->param);
635 return ECORE_ABORTED;
638 return _ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
642 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
643 struct ecore_ptt *p_ptt, u32 cmd, u32 param,
644 u32 *o_mcp_resp, u32 *o_mcp_param)
646 struct ecore_mcp_mb_params mb_params;
647 enum _ecore_status_t rc;
650 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
651 if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
653 loaded_port[p_hwfn->port_id]--;
654 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
657 return ECORE_SUCCESS;
661 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
663 mb_params.param = param;
664 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
665 if (rc != ECORE_SUCCESS)
668 *o_mcp_resp = mb_params.mcp_resp;
669 *o_mcp_param = mb_params.mcp_param;
671 return ECORE_SUCCESS;
674 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
675 struct ecore_ptt *p_ptt,
683 struct ecore_mcp_mb_params mb_params;
684 enum _ecore_status_t rc;
686 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
688 mb_params.param = param;
689 mb_params.p_data_src = i_buf;
690 mb_params.data_src_size = (u8) i_txn_size;
691 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
692 if (rc != ECORE_SUCCESS)
695 *o_mcp_resp = mb_params.mcp_resp;
696 *o_mcp_param = mb_params.mcp_param;
698 return ECORE_SUCCESS;
701 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
702 struct ecore_ptt *p_ptt,
710 struct ecore_mcp_mb_params mb_params;
711 u8 raw_data[MCP_DRV_NVM_BUF_LEN];
712 enum _ecore_status_t rc;
714 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
716 mb_params.param = param;
717 mb_params.p_data_dst = raw_data;
719 /* Use the maximal value since the actual one is part of the response */
720 mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
722 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
723 if (rc != ECORE_SUCCESS)
726 *o_mcp_resp = mb_params.mcp_resp;
727 *o_mcp_param = mb_params.mcp_param;
729 *o_txn_size = *o_mcp_param;
730 OSAL_MEMCPY(o_buf, raw_data, *o_txn_size);
732 return ECORE_SUCCESS;
736 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
739 static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
742 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
743 } else if (!loaded_port[p_hwfn->port_id]) {
744 load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
746 load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
749 /* On CMT, always tell that it's engine */
750 if (p_hwfn->p_dev->num_hwfns > 1)
751 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
753 *p_load_code = load_phase;
755 loaded_port[p_hwfn->port_id]++;
757 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
758 "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
759 *p_load_code, loaded, p_hwfn->port_id,
760 loaded_port[p_hwfn->port_id]);
765 ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role,
766 enum ecore_override_force_load override_force_load)
768 bool can_force_load = false;
770 switch (override_force_load) {
771 case ECORE_OVERRIDE_FORCE_LOAD_ALWAYS:
772 can_force_load = true;
774 case ECORE_OVERRIDE_FORCE_LOAD_NEVER:
775 can_force_load = false;
778 can_force_load = (drv_role == DRV_ROLE_OS &&
779 exist_drv_role == DRV_ROLE_PREBOOT) ||
780 (drv_role == DRV_ROLE_KDUMP &&
781 exist_drv_role == DRV_ROLE_OS);
785 return can_force_load;
788 static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
789 struct ecore_ptt *p_ptt)
791 u32 resp = 0, param = 0;
792 enum _ecore_status_t rc;
794 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
796 if (rc != ECORE_SUCCESS)
797 DP_NOTICE(p_hwfn, false,
798 "Failed to send cancel load request, rc = %d\n", rc);
803 #define CONFIG_ECORE_L2_BITMAP_IDX (0x1 << 0)
804 #define CONFIG_ECORE_SRIOV_BITMAP_IDX (0x1 << 1)
805 #define CONFIG_ECORE_ROCE_BITMAP_IDX (0x1 << 2)
806 #define CONFIG_ECORE_IWARP_BITMAP_IDX (0x1 << 3)
807 #define CONFIG_ECORE_FCOE_BITMAP_IDX (0x1 << 4)
808 #define CONFIG_ECORE_ISCSI_BITMAP_IDX (0x1 << 5)
809 #define CONFIG_ECORE_LL2_BITMAP_IDX (0x1 << 6)
811 static u32 ecore_get_config_bitmap(void)
813 u32 config_bitmap = 0x0;
815 #ifdef CONFIG_ECORE_L2
816 config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX;
818 #ifdef CONFIG_ECORE_SRIOV
819 config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX;
821 #ifdef CONFIG_ECORE_ROCE
822 config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX;
824 #ifdef CONFIG_ECORE_IWARP
825 config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX;
827 #ifdef CONFIG_ECORE_FCOE
828 config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX;
830 #ifdef CONFIG_ECORE_ISCSI
831 config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX;
833 #ifdef CONFIG_ECORE_LL2
834 config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX;
837 return config_bitmap;
840 struct ecore_load_req_in_params {
842 #define ECORE_LOAD_REQ_HSI_VER_DEFAULT 0
843 #define ECORE_LOAD_REQ_HSI_VER_1 1
850 bool avoid_eng_reset;
853 struct ecore_load_req_out_params {
863 static enum _ecore_status_t
864 __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
865 struct ecore_load_req_in_params *p_in_params,
866 struct ecore_load_req_out_params *p_out_params)
868 struct ecore_mcp_mb_params mb_params;
869 struct load_req_stc load_req;
870 struct load_rsp_stc load_rsp;
872 enum _ecore_status_t rc;
874 OSAL_MEM_ZERO(&load_req, sizeof(load_req));
875 load_req.drv_ver_0 = p_in_params->drv_ver_0;
876 load_req.drv_ver_1 = p_in_params->drv_ver_1;
877 load_req.fw_ver = p_in_params->fw_ver;
878 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
879 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
880 p_in_params->timeout_val);
881 SET_MFW_FIELD(load_req.misc0, (u64)LOAD_REQ_FORCE, p_in_params->force_cmd);
882 SET_MFW_FIELD(load_req.misc0, (u64)LOAD_REQ_FLAGS0,
883 p_in_params->avoid_eng_reset);
885 hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
886 DRV_ID_MCP_HSI_VER_CURRENT :
887 (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_OFFSET);
889 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
890 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
891 mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type;
892 mb_params.p_data_src = &load_req;
893 mb_params.data_src_size = sizeof(load_req);
894 mb_params.p_data_dst = &load_rsp;
895 mb_params.data_dst_size = sizeof(load_rsp);
897 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
898 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
900 GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
901 GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
902 GET_MFW_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
903 GET_MFW_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
905 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
906 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
907 "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
908 load_req.drv_ver_0, load_req.drv_ver_1,
909 load_req.fw_ver, load_req.misc0,
910 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE),
911 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO),
912 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE),
913 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
915 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
916 if (rc != ECORE_SUCCESS) {
917 DP_NOTICE(p_hwfn, false,
918 "Failed to send load request, rc = %d\n", rc);
922 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
923 "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
924 p_out_params->load_code = mb_params.mcp_resp;
926 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
927 p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
928 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
929 "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
930 load_rsp.drv_ver_0, load_rsp.drv_ver_1,
931 load_rsp.fw_ver, load_rsp.misc0,
932 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
933 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
934 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
936 p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
937 p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
938 p_out_params->exist_fw_ver = load_rsp.fw_ver;
939 p_out_params->exist_drv_role =
940 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
941 p_out_params->mfw_hsi_ver =
942 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
943 p_out_params->drv_exists =
944 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
945 LOAD_RSP_FLAGS0_DRV_EXISTS;
948 return ECORE_SUCCESS;
951 static void ecore_get_mfw_drv_role(enum ecore_drv_role drv_role,
955 case ECORE_DRV_ROLE_OS:
956 *p_mfw_drv_role = DRV_ROLE_OS;
958 case ECORE_DRV_ROLE_KDUMP:
959 *p_mfw_drv_role = DRV_ROLE_KDUMP;
964 enum ecore_load_req_force {
965 ECORE_LOAD_REQ_FORCE_NONE,
966 ECORE_LOAD_REQ_FORCE_PF,
967 ECORE_LOAD_REQ_FORCE_ALL,
970 static void ecore_get_mfw_force_cmd(enum ecore_load_req_force force_cmd,
974 case ECORE_LOAD_REQ_FORCE_NONE:
975 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
977 case ECORE_LOAD_REQ_FORCE_PF:
978 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
980 case ECORE_LOAD_REQ_FORCE_ALL:
981 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
986 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
987 struct ecore_ptt *p_ptt,
988 struct ecore_load_req_params *p_params)
990 struct ecore_load_req_out_params out_params;
991 struct ecore_load_req_in_params in_params;
992 u8 mfw_drv_role = 0, mfw_force_cmd;
993 enum _ecore_status_t rc;
996 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
997 ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
998 return ECORE_SUCCESS;
1002 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
1003 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
1004 in_params.drv_ver_0 = ECORE_VERSION;
1005 in_params.drv_ver_1 = ecore_get_config_bitmap();
1006 in_params.fw_ver = STORM_FW_VERSION;
1007 ecore_get_mfw_drv_role(p_params->drv_role, &mfw_drv_role);
1008 in_params.drv_role = mfw_drv_role;
1009 in_params.timeout_val = p_params->timeout_val;
1010 ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
1011 in_params.force_cmd = mfw_force_cmd;
1012 in_params.avoid_eng_reset = p_params->avoid_eng_reset;
1014 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
1015 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
1016 if (rc != ECORE_SUCCESS)
1019 /* First handle cases where another load request should/might be sent:
1020 * - MFW expects the old interface [HSI version = 1]
1021 * - MFW responds that a force load request is required
1023 if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
1025 "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
1027 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
1028 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
1029 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
1031 if (rc != ECORE_SUCCESS)
1033 } else if (out_params.load_code ==
1034 FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
1035 if (ecore_mcp_can_force_load(in_params.drv_role,
1036 out_params.exist_drv_role,
1037 p_params->override_force_load)) {
1039 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, 0x%08x_%08x}, existing={%d, 0x%08x, 0x%08x_%08x}]\n",
1040 in_params.drv_role, in_params.fw_ver,
1041 in_params.drv_ver_1, in_params.drv_ver_0,
1042 out_params.exist_drv_role,
1043 out_params.exist_fw_ver,
1044 out_params.exist_drv_ver_1,
1045 out_params.exist_drv_ver_0);
1047 ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_ALL,
1050 in_params.force_cmd = mfw_force_cmd;
1051 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
1052 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
1054 if (rc != ECORE_SUCCESS)
1057 DP_NOTICE(p_hwfn, false,
1058 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
1059 in_params.drv_role, in_params.fw_ver,
1060 in_params.drv_ver_0, in_params.drv_ver_1,
1061 out_params.exist_drv_role,
1062 out_params.exist_fw_ver,
1063 out_params.exist_drv_ver_0,
1064 out_params.exist_drv_ver_1);
1066 ecore_mcp_cancel_load_req(p_hwfn, p_ptt);
1071 /* Now handle the other types of responses.
1072 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
1073 * expected here after the additional revised load requests were sent.
1075 switch (out_params.load_code) {
1076 case FW_MSG_CODE_DRV_LOAD_ENGINE:
1077 case FW_MSG_CODE_DRV_LOAD_PORT:
1078 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
1079 if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
1080 out_params.drv_exists) {
1081 /* The role and fw/driver version match, but the PF is
1082 * already loaded and has not been unloaded gracefully.
1083 * This is unexpected since a quasi-FLR request was
1084 * previously sent as part of ecore_hw_prepare().
1086 DP_NOTICE(p_hwfn, false,
1087 "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n");
1092 DP_NOTICE(p_hwfn, false,
1093 "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
1094 out_params.load_code);
1098 p_params->load_code = out_params.load_code;
1100 return ECORE_SUCCESS;
1103 enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
1104 struct ecore_ptt *p_ptt)
1106 u32 wol_param, mcp_resp, mcp_param;
1108 switch (p_hwfn->p_dev->wol_config) {
1109 case ECORE_OV_WOL_DISABLED:
1110 wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
1112 case ECORE_OV_WOL_ENABLED:
1113 wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
1116 DP_NOTICE(p_hwfn, true,
1117 "Unknown WoL configuration %02x\n",
1118 p_hwfn->p_dev->wol_config);
1120 case ECORE_OV_WOL_DEFAULT:
1121 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1124 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
1125 &mcp_resp, &mcp_param);
1128 enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn,
1129 struct ecore_ptt *p_ptt)
1131 struct ecore_mcp_mb_params mb_params;
1132 struct mcp_mac wol_mac;
1134 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1135 mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
1137 /* Set the primary MAC if WoL is enabled */
1138 if (p_hwfn->p_dev->wol_config == ECORE_OV_WOL_ENABLED) {
1139 u8 *p_mac = p_hwfn->p_dev->wol_mac;
1141 OSAL_MEM_ZERO(&wol_mac, sizeof(wol_mac));
1142 wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
1143 wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
1144 p_mac[4] << 8 | p_mac[5];
1146 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFDOWN),
1147 "Setting WoL MAC: %02x:%02x:%02x:%02x:%02x:%02x --> [%08x,%08x]\n",
1148 p_mac[0], p_mac[1], p_mac[2], p_mac[3], p_mac[4],
1149 p_mac[5], wol_mac.mac_upper, wol_mac.mac_lower);
1151 mb_params.p_data_src = &wol_mac;
1152 mb_params.data_src_size = sizeof(wol_mac);
1155 return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1158 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
1159 struct ecore_ptt *p_ptt)
1161 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1163 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1164 u32 path_addr = SECTION_ADDR(mfw_path_offsize,
1165 ECORE_PATH_ID(p_hwfn));
1166 u32 disabled_vfs[VF_MAX_STATIC / 32];
1169 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1170 "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
1171 mfw_path_offsize, path_addr);
1173 for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
1174 disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
1176 OFFSETOF(struct public_path,
1179 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1180 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1181 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
1184 if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
1185 OSAL_VF_FLR_UPDATE(p_hwfn);
1188 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
1189 struct ecore_ptt *p_ptt,
1192 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1194 u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1195 u32 func_addr = SECTION_ADDR(mfw_func_offsize,
1197 struct ecore_mcp_mb_params mb_params;
1198 enum _ecore_status_t rc;
1201 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1202 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1203 "Acking VFs [%08x,...,%08x] - %08x\n",
1204 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
1206 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1207 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
1208 mb_params.p_data_src = vfs_to_ack;
1209 mb_params.data_src_size = VF_MAX_STATIC / 8;
1210 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1211 if (rc != ECORE_SUCCESS) {
1212 DP_NOTICE(p_hwfn, false,
1213 "Failed to pass ACK for VF flr to MFW\n");
1214 return ECORE_TIMEOUT;
1217 /* TMP - clear the ACK bits; should be done by MFW */
1218 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1219 ecore_wr(p_hwfn, p_ptt,
1221 OFFSETOF(struct public_func, drv_ack_vf_disabled) +
1222 i * sizeof(u32), 0);
1227 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
1228 struct ecore_ptt *p_ptt)
1230 u32 transceiver_state;
1232 transceiver_state = ecore_rd(p_hwfn, p_ptt,
1233 p_hwfn->mcp_info->port_addr +
1234 OFFSETOF(struct public_port,
1237 DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
1238 "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
1239 transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
1240 OFFSETOF(struct public_port,
1241 transceiver_data)));
1243 transceiver_state = GET_MFW_FIELD(transceiver_state,
1244 ETH_TRANSCEIVER_STATE);
1246 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
1247 DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
1249 DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
1252 static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn,
1253 struct ecore_ptt *p_ptt,
1254 struct ecore_mcp_link_state *p_link)
1256 u32 eee_status, val;
1258 p_link->eee_adv_caps = 0;
1259 p_link->eee_lp_adv_caps = 0;
1260 eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1261 OFFSETOF(struct public_port, eee_status));
1262 p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
1263 val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
1264 if (val & EEE_1G_ADV)
1265 p_link->eee_adv_caps |= ECORE_EEE_1G_ADV;
1266 if (val & EEE_10G_ADV)
1267 p_link->eee_adv_caps |= ECORE_EEE_10G_ADV;
1268 val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
1269 if (val & EEE_1G_ADV)
1270 p_link->eee_lp_adv_caps |= ECORE_EEE_1G_ADV;
1271 if (val & EEE_10G_ADV)
1272 p_link->eee_lp_adv_caps |= ECORE_EEE_10G_ADV;
1275 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
1276 struct ecore_ptt *p_ptt,
1279 struct ecore_mcp_link_state *p_link;
1283 /* Prevent SW/attentions from doing this at the same time */
1284 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->link_lock);
1286 p_link = &p_hwfn->mcp_info->link_output;
1287 OSAL_MEMSET(p_link, 0, sizeof(*p_link));
1289 status = ecore_rd(p_hwfn, p_ptt,
1290 p_hwfn->mcp_info->port_addr +
1291 OFFSETOF(struct public_port, link_status));
1292 DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
1293 "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
1294 status, (u32)(p_hwfn->mcp_info->port_addr +
1295 OFFSETOF(struct public_port, link_status)));
1297 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1298 "Resetting link indications\n");
1302 if (p_hwfn->b_drv_link_init)
1303 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1305 p_link->link_up = false;
1307 p_link->full_duplex = true;
1308 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1309 case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1310 p_link->speed = 100000;
1312 case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1313 p_link->speed = 50000;
1315 case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1316 p_link->speed = 40000;
1318 case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1319 p_link->speed = 25000;
1321 case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1322 p_link->speed = 20000;
1324 case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1325 p_link->speed = 10000;
1327 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1328 p_link->full_duplex = false;
1330 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1331 p_link->speed = 1000;
1337 /* We never store total line speed as p_link->speed is
1338 * again changes according to bandwidth allocation.
1340 if (p_link->link_up && p_link->speed)
1341 p_link->line_speed = p_link->speed;
1343 p_link->line_speed = 0;
1345 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1346 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1348 /* Max bandwidth configuration */
1349 __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
1351 /* Min bandwidth configuration */
1352 __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
1353 ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt,
1354 p_link->min_pf_rate);
1356 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1357 p_link->an_complete = !!(status &
1358 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1359 p_link->parallel_detection = !!(status &
1360 LINK_STATUS_PARALLEL_DETECTION_USED);
1361 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1363 p_link->partner_adv_speed |=
1364 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1365 ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
1366 p_link->partner_adv_speed |=
1367 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1368 ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
1369 p_link->partner_adv_speed |=
1370 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1371 ECORE_LINK_PARTNER_SPEED_10G : 0;
1372 p_link->partner_adv_speed |=
1373 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1374 ECORE_LINK_PARTNER_SPEED_20G : 0;
1375 p_link->partner_adv_speed |=
1376 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1377 ECORE_LINK_PARTNER_SPEED_25G : 0;
1378 p_link->partner_adv_speed |=
1379 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1380 ECORE_LINK_PARTNER_SPEED_40G : 0;
1381 p_link->partner_adv_speed |=
1382 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1383 ECORE_LINK_PARTNER_SPEED_50G : 0;
1384 p_link->partner_adv_speed |=
1385 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1386 ECORE_LINK_PARTNER_SPEED_100G : 0;
1388 p_link->partner_tx_flow_ctrl_en =
1389 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1390 p_link->partner_rx_flow_ctrl_en =
1391 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1393 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1394 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1395 p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
1397 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1398 p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
1400 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1401 p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
1404 p_link->partner_adv_pause = 0;
1407 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1409 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
1410 ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
1412 OSAL_LINK_UPDATE(p_hwfn);
1414 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock);
1417 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
1418 struct ecore_ptt *p_ptt,
1421 struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1422 struct ecore_mcp_mb_params mb_params;
1423 struct eth_phy_cfg phy_cfg;
1424 enum _ecore_status_t rc = ECORE_SUCCESS;
1428 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1429 return ECORE_SUCCESS;
1432 /* Set the shmem configuration according to params */
1433 OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg));
1434 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1435 if (!params->speed.autoneg)
1436 phy_cfg.speed = params->speed.forced_speed;
1437 phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1438 phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1439 phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1440 phy_cfg.adv_speed = params->speed.advertised_speeds;
1441 phy_cfg.loopback_mode = params->loopback_mode;
1442 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
1443 if (params->eee.enable)
1444 phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
1445 if (params->eee.tx_lpi_enable)
1446 phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1447 if (params->eee.adv_caps & ECORE_EEE_1G_ADV)
1448 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
1449 if (params->eee.adv_caps & ECORE_EEE_10G_ADV)
1450 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
1451 phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
1452 EEE_TX_TIMER_USEC_OFFSET) &
1453 EEE_TX_TIMER_USEC_MASK;
1456 p_hwfn->b_drv_link_init = b_up;
1459 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1460 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n",
1461 phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
1462 phy_cfg.loopback_mode);
1464 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
1466 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1467 mb_params.cmd = cmd;
1468 mb_params.p_data_src = &phy_cfg;
1469 mb_params.data_src_size = sizeof(phy_cfg);
1470 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1472 /* if mcp fails to respond we must abort */
1473 if (rc != ECORE_SUCCESS) {
1474 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1478 /* Mimic link-change attention, done for several reasons:
1479 * - On reset, there's no guarantee MFW would trigger
1481 * - On initialization, older MFWs might not indicate link change
1482 * during LFA, so we'll never get an UP indication.
1484 ecore_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
1489 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
1490 struct ecore_ptt *p_ptt)
1492 u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1494 /* TODO - Add support for VFs */
1495 if (IS_VF(p_hwfn->p_dev))
1498 path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1500 path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
1501 path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
1503 proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
1505 OFFSETOF(struct public_path, process_kill)) &
1506 PROCESS_KILL_COUNTER_MASK;
1508 return proc_kill_cnt;
1511 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
1512 struct ecore_ptt *p_ptt)
1514 struct ecore_dev *p_dev = p_hwfn->p_dev;
1517 /* Prevent possible attentions/interrupts during the recovery handling
1518 * and till its load phase, during which they will be re-enabled.
1520 ecore_int_igu_disable_int(p_hwfn, p_ptt);
1522 DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
1524 /* The following operations should be done once, and thus in CMT mode
1525 * are carried out by only the first HW function.
1527 if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
1530 if (p_dev->recov_in_prog) {
1531 DP_NOTICE(p_hwfn, false,
1532 "Ignoring the indication since a recovery process is already in progress\n");
1536 p_dev->recov_in_prog = true;
1538 proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
1539 DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
1541 OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
1544 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
1545 struct ecore_ptt *p_ptt,
1546 enum MFW_DRV_MSG_TYPE type)
1548 enum ecore_mcp_protocol_type stats_type;
1549 union ecore_mcp_protocol_stats stats;
1550 struct ecore_mcp_mb_params mb_params;
1552 enum _ecore_status_t rc;
1555 case MFW_DRV_MSG_GET_LAN_STATS:
1556 stats_type = ECORE_MCP_LAN_STATS;
1557 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1559 case MFW_DRV_MSG_GET_FCOE_STATS:
1560 stats_type = ECORE_MCP_FCOE_STATS;
1561 hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
1563 case MFW_DRV_MSG_GET_ISCSI_STATS:
1564 stats_type = ECORE_MCP_ISCSI_STATS;
1565 hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
1567 case MFW_DRV_MSG_GET_RDMA_STATS:
1568 stats_type = ECORE_MCP_RDMA_STATS;
1569 hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
1572 DP_NOTICE(p_hwfn, false, "Invalid protocol type %d\n", type);
1576 OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
1578 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1579 mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1580 mb_params.param = hsi_param;
1581 mb_params.p_data_src = &stats;
1582 mb_params.data_src_size = sizeof(stats);
1583 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1584 if (rc != ECORE_SUCCESS)
1585 DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc);
1588 static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
1589 struct public_func *p_shmem_info)
1591 struct ecore_mcp_function_info *p_info;
1593 p_info = &p_hwfn->mcp_info->func_info;
1595 /* TODO - bandwidth min/max should have valid values of 1-100,
1596 * as well as some indication that the feature is disabled.
1597 * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
1598 * limit and correct value to min `1' and max `100' if limit isn't in
1601 p_info->bandwidth_min = (p_shmem_info->config &
1602 FUNC_MF_CFG_MIN_BW_MASK) >>
1603 FUNC_MF_CFG_MIN_BW_OFFSET;
1604 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1606 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1607 p_info->bandwidth_min);
1608 p_info->bandwidth_min = 1;
1611 p_info->bandwidth_max = (p_shmem_info->config &
1612 FUNC_MF_CFG_MAX_BW_MASK) >>
1613 FUNC_MF_CFG_MAX_BW_OFFSET;
1614 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1616 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1617 p_info->bandwidth_max);
1618 p_info->bandwidth_max = 100;
1622 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
1623 struct ecore_ptt *p_ptt,
1624 struct public_func *p_data,
1627 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1629 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1630 u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1633 OSAL_MEM_ZERO(p_data, sizeof(*p_data));
1635 size = OSAL_MIN_T(u32, sizeof(*p_data),
1636 SECTION_SIZE(mfw_path_offsize));
1637 for (i = 0; i < size / sizeof(u32); i++)
1638 ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
1639 func_addr + (i << 2));
1644 /* This was introduced with FW 8.10.5.0; Hopefully this is only temp. */
1645 enum _ecore_status_t ecore_hw_init_first_eth(struct ecore_hwfn *p_hwfn,
1646 struct ecore_ptt *p_ptt,
1649 struct public_func shmem_info;
1652 /* Find first Ethernet interface in port */
1653 for (i = 0; i < NUM_OF_ENG_PFS(p_hwfn->p_dev);
1654 i += p_hwfn->p_dev->num_ports_in_engine) {
1655 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1656 MCP_PF_ID_BY_REL(p_hwfn, i));
1658 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
1661 if ((shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK) ==
1662 FUNC_MF_CFG_PROTOCOL_ETHERNET) {
1664 return ECORE_SUCCESS;
1668 /* This might actually be valid somewhere in the future but for now
1669 * it's highly unlikely.
1671 DP_NOTICE(p_hwfn, false,
1672 "Failed to find on port an ethernet interface in MF_SI mode\n");
1678 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1680 struct ecore_mcp_function_info *p_info;
1681 struct public_func shmem_info;
1682 u32 resp = 0, param = 0;
1684 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1687 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1689 p_info = &p_hwfn->mcp_info->func_info;
1691 ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
1693 ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
1695 /* Acknowledge the MFW */
1696 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1700 static void ecore_mcp_update_stag(struct ecore_hwfn *p_hwfn,
1701 struct ecore_ptt *p_ptt)
1703 struct public_func shmem_info;
1704 u32 resp = 0, param = 0;
1706 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1709 p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
1710 FUNC_MF_CFG_OV_STAG_MASK;
1711 p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
1712 if ((p_hwfn->hw_info.hw_mode & (1 << MODE_MF_SD)) &&
1713 (p_hwfn->hw_info.ovlan != ECORE_MCP_VLAN_UNSET)) {
1714 ecore_wr(p_hwfn, p_ptt,
1715 NIG_REG_LLH_FUNC_TAG_VALUE,
1716 p_hwfn->hw_info.ovlan);
1717 ecore_sp_pf_update_stag(p_hwfn);
1720 OSAL_HW_INFO_CHANGE(p_hwfn, ECORE_HW_INFO_CHANGE_OVLAN);
1722 /* Acknowledge the MFW */
1723 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
1727 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn)
1729 /* A single notification should be sent to upper driver in CMT mode */
1730 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1733 DP_NOTICE(p_hwfn, false,
1734 "Fan failure was detected on the network interface card and it's going to be shut down.\n");
1736 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
1739 struct ecore_mdump_cmd_params {
1748 static enum _ecore_status_t
1749 ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1750 struct ecore_mdump_cmd_params *p_mdump_cmd_params)
1752 struct ecore_mcp_mb_params mb_params;
1753 enum _ecore_status_t rc;
1755 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1756 mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1757 mb_params.param = p_mdump_cmd_params->cmd;
1758 mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
1759 mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
1760 mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
1761 mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
1762 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1763 if (rc != ECORE_SUCCESS)
1766 p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
1768 if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1770 "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
1771 p_mdump_cmd_params->cmd);
1773 } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
1775 "The mdump command is not supported by the MFW\n");
1782 static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
1783 struct ecore_ptt *p_ptt)
1785 struct ecore_mdump_cmd_params mdump_cmd_params;
1787 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1788 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
1790 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1793 enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
1794 struct ecore_ptt *p_ptt,
1797 struct ecore_mdump_cmd_params mdump_cmd_params;
1799 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1800 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES;
1801 mdump_cmd_params.p_data_src = &epoch;
1802 mdump_cmd_params.data_src_size = sizeof(epoch);
1804 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1807 enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
1808 struct ecore_ptt *p_ptt)
1810 struct ecore_mdump_cmd_params mdump_cmd_params;
1812 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1813 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER;
1815 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1818 static enum _ecore_status_t
1819 ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1820 struct mdump_config_stc *p_mdump_config)
1822 struct ecore_mdump_cmd_params mdump_cmd_params;
1823 enum _ecore_status_t rc;
1825 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1826 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG;
1827 mdump_cmd_params.p_data_dst = p_mdump_config;
1828 mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config);
1830 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1831 if (rc != ECORE_SUCCESS)
1834 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1836 "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
1837 mdump_cmd_params.mcp_resp);
1838 rc = ECORE_UNKNOWN_ERROR;
1844 enum _ecore_status_t
1845 ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1846 struct ecore_mdump_info *p_mdump_info)
1848 u32 addr, global_offsize, global_addr;
1849 struct mdump_config_stc mdump_config;
1850 enum _ecore_status_t rc;
1852 OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
1854 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1856 global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1857 global_addr = SECTION_ADDR(global_offsize, 0);
1858 p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt,
1860 OFFSETOF(struct public_global,
1863 if (p_mdump_info->reason) {
1864 rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
1865 if (rc != ECORE_SUCCESS)
1868 p_mdump_info->version = mdump_config.version;
1869 p_mdump_info->config = mdump_config.config;
1870 p_mdump_info->epoch = mdump_config.epoc;
1871 p_mdump_info->num_of_logs = mdump_config.num_of_logs;
1872 p_mdump_info->valid_logs = mdump_config.valid_logs;
1874 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1875 "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
1876 p_mdump_info->reason, p_mdump_info->version,
1877 p_mdump_info->config, p_mdump_info->epoch,
1878 p_mdump_info->num_of_logs, p_mdump_info->valid_logs);
1880 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1881 "MFW mdump info: reason %d\n", p_mdump_info->reason);
1884 return ECORE_SUCCESS;
1887 enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
1888 struct ecore_ptt *p_ptt)
1890 struct ecore_mdump_cmd_params mdump_cmd_params;
1892 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1893 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS;
1895 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1898 enum _ecore_status_t
1899 ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1900 struct ecore_mdump_retain_data *p_mdump_retain)
1902 struct ecore_mdump_cmd_params mdump_cmd_params;
1903 struct mdump_retain_data_stc mfw_mdump_retain;
1904 enum _ecore_status_t rc;
1906 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1907 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
1908 mdump_cmd_params.p_data_dst = &mfw_mdump_retain;
1909 mdump_cmd_params.data_dst_size = sizeof(mfw_mdump_retain);
1911 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1912 if (rc != ECORE_SUCCESS)
1915 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1917 "Failed to get the mdump retained data [mcp_resp 0x%x]\n",
1918 mdump_cmd_params.mcp_resp);
1919 return ECORE_UNKNOWN_ERROR;
1922 p_mdump_retain->valid = mfw_mdump_retain.valid;
1923 p_mdump_retain->epoch = mfw_mdump_retain.epoch;
1924 p_mdump_retain->pf = mfw_mdump_retain.pf;
1925 p_mdump_retain->status = mfw_mdump_retain.status;
1927 return ECORE_SUCCESS;
1930 enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn,
1931 struct ecore_ptt *p_ptt)
1933 struct ecore_mdump_cmd_params mdump_cmd_params;
1935 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1936 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLR_RETAIN;
1938 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1941 static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
1942 struct ecore_ptt *p_ptt)
1944 struct ecore_mdump_retain_data mdump_retain;
1945 enum _ecore_status_t rc;
1947 /* In CMT mode - no need for more than a single acknowledgement to the
1948 * MFW, and no more than a single notification to the upper driver.
1950 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1953 rc = ecore_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
1954 if (rc == ECORE_SUCCESS && mdump_retain.valid) {
1955 DP_NOTICE(p_hwfn, false,
1956 "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
1957 mdump_retain.epoch, mdump_retain.pf,
1958 mdump_retain.status);
1960 DP_NOTICE(p_hwfn, false,
1961 "The MFW notified that a critical error occurred in the device\n");
1964 if (p_hwfn->p_dev->allow_mdump) {
1965 DP_NOTICE(p_hwfn, false,
1966 "Not acknowledging the notification to allow the MFW crash dump\n");
1970 DP_NOTICE(p_hwfn, false,
1971 "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
1972 ecore_mcp_mdump_ack(p_hwfn, p_ptt);
1973 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
1976 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
1977 struct ecore_ptt *p_ptt)
1979 struct ecore_mcp_info *info = p_hwfn->mcp_info;
1980 enum _ecore_status_t rc = ECORE_SUCCESS;
1984 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
1986 /* Read Messages from MFW */
1987 ecore_mcp_read_mb(p_hwfn, p_ptt);
1989 /* Compare current messages to old ones */
1990 for (i = 0; i < info->mfw_mb_length; i++) {
1991 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1996 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1997 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1998 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
2001 case MFW_DRV_MSG_LINK_CHANGE:
2002 ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
2004 case MFW_DRV_MSG_VF_DISABLED:
2005 ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
2007 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
2008 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2009 ECORE_DCBX_REMOTE_LLDP_MIB);
2011 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
2012 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2013 ECORE_DCBX_REMOTE_MIB);
2015 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
2016 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2017 ECORE_DCBX_OPERATIONAL_MIB);
2019 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
2020 ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
2022 case MFW_DRV_MSG_ERROR_RECOVERY:
2023 ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
2025 case MFW_DRV_MSG_GET_LAN_STATS:
2026 case MFW_DRV_MSG_GET_FCOE_STATS:
2027 case MFW_DRV_MSG_GET_ISCSI_STATS:
2028 case MFW_DRV_MSG_GET_RDMA_STATS:
2029 ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
2031 case MFW_DRV_MSG_BW_UPDATE:
2032 ecore_mcp_update_bw(p_hwfn, p_ptt);
2034 case MFW_DRV_MSG_S_TAG_UPDATE:
2035 ecore_mcp_update_stag(p_hwfn, p_ptt);
2037 case MFW_DRV_MSG_FAILURE_DETECTED:
2038 ecore_mcp_handle_fan_failure(p_hwfn);
2040 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
2041 ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
2043 case MFW_DRV_MSG_GET_TLV_REQ:
2044 OSAL_MFW_TLV_REQ(p_hwfn);
2047 DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
2052 /* ACK everything */
2053 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
2054 OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
2056 /* MFW expect answer in BE, so we force write in that format */
2057 ecore_wr(p_hwfn, p_ptt,
2058 info->mfw_mb_addr + sizeof(u32) +
2059 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
2060 sizeof(u32) + i * sizeof(u32), val);
2064 DP_NOTICE(p_hwfn, false,
2065 "Received an MFW message indication but no new message!\n");
2069 /* Copy the new mfw messages into the shadow */
2070 OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
2075 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
2076 struct ecore_ptt *p_ptt,
2078 u32 *p_running_bundle_id)
2083 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2084 DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
2085 return ECORE_SUCCESS;
2089 if (IS_VF(p_hwfn->p_dev)) {
2090 if (p_hwfn->vf_iov_info) {
2091 struct pfvf_acquire_resp_tlv *p_resp;
2093 p_resp = &p_hwfn->vf_iov_info->acquire_resp;
2094 *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
2095 return ECORE_SUCCESS;
2097 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2098 "VF requested MFW version prior to ACQUIRE\n");
2103 global_offsize = ecore_rd(p_hwfn, p_ptt,
2104 SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
2106 *p_mfw_ver = ecore_rd(p_hwfn, p_ptt,
2107 SECTION_ADDR(global_offsize, 0) +
2108 OFFSETOF(struct public_global, mfw_ver));
2110 if (p_running_bundle_id != OSAL_NULL) {
2111 *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
2112 SECTION_ADDR(global_offsize, 0) +
2113 OFFSETOF(struct public_global,
2114 running_bundle_id));
2117 return ECORE_SUCCESS;
2120 enum _ecore_status_t ecore_mcp_get_mbi_ver(struct ecore_hwfn *p_hwfn,
2121 struct ecore_ptt *p_ptt,
2124 u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr;
2127 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2128 DP_NOTICE(p_hwfn, false, "Emulation - can't get MBI version\n");
2129 return ECORE_SUCCESS;
2133 if (IS_VF(p_hwfn->p_dev))
2136 /* Read the address of the nvm_cfg */
2137 nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
2138 if (!nvm_cfg_addr) {
2139 DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n");
2143 /* Read the offset of nvm_cfg1 */
2144 nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
2146 mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2147 OFFSETOF(struct nvm_cfg1, glob) +
2148 OFFSETOF(struct nvm_cfg1_glob, mbi_version);
2149 *p_mbi_ver = ecore_rd(p_hwfn, p_ptt, mbi_ver_addr) &
2150 (NVM_CFG1_GLOB_MBI_VERSION_0_MASK |
2151 NVM_CFG1_GLOB_MBI_VERSION_1_MASK |
2152 NVM_CFG1_GLOB_MBI_VERSION_2_MASK);
2154 return ECORE_SUCCESS;
2157 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
2160 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0];
2161 struct ecore_ptt *p_ptt;
2163 /* TODO - Add support for VFs */
2167 if (!ecore_mcp_is_init(p_hwfn)) {
2168 DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n");
2172 *p_media_type = MEDIA_UNSPECIFIED;
2174 p_ptt = ecore_ptt_acquire(p_hwfn);
2178 *p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
2179 OFFSETOF(struct public_port, media_type));
2181 ecore_ptt_release(p_hwfn, p_ptt);
2183 return ECORE_SUCCESS;
2186 /* Old MFW has a global configuration for all PFs regarding RDMA support */
2188 ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn,
2189 enum ecore_pci_personality *p_proto)
2191 /* There wasn't ever a legacy MFW that published iwarp.
2192 * So at this point, this is either plain l2 or RoCE.
2194 if (OSAL_TEST_BIT(ECORE_DEV_CAP_ROCE,
2195 &p_hwfn->hw_info.device_capabilities))
2196 *p_proto = ECORE_PCI_ETH_ROCE;
2198 *p_proto = ECORE_PCI_ETH;
2200 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2201 "According to Legacy capabilities, L2 personality is %08x\n",
2205 static enum _ecore_status_t
2206 ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn,
2207 struct ecore_ptt *p_ptt,
2208 enum ecore_pci_personality *p_proto)
2210 u32 resp = 0, param = 0;
2211 enum _ecore_status_t rc;
2213 rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2214 DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, ¶m);
2215 if (rc != ECORE_SUCCESS)
2217 if (resp != FW_MSG_CODE_OK) {
2218 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2219 "MFW lacks support for command; Returns %08x\n",
2225 case FW_MB_PARAM_GET_PF_RDMA_NONE:
2226 *p_proto = ECORE_PCI_ETH;
2228 case FW_MB_PARAM_GET_PF_RDMA_ROCE:
2229 *p_proto = ECORE_PCI_ETH_ROCE;
2231 case FW_MB_PARAM_GET_PF_RDMA_IWARP:
2232 *p_proto = ECORE_PCI_ETH_IWARP;
2234 case FW_MB_PARAM_GET_PF_RDMA_BOTH:
2235 *p_proto = ECORE_PCI_ETH_RDMA;
2238 DP_NOTICE(p_hwfn, true,
2239 "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
2244 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2245 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
2246 (u32) *p_proto, resp, param);
2247 return ECORE_SUCCESS;
2250 static enum _ecore_status_t
2251 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
2252 struct public_func *p_info,
2253 struct ecore_ptt *p_ptt,
2254 enum ecore_pci_personality *p_proto)
2256 enum _ecore_status_t rc = ECORE_SUCCESS;
2258 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
2259 case FUNC_MF_CFG_PROTOCOL_ETHERNET:
2260 if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) !=
2262 ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
2264 case FUNC_MF_CFG_PROTOCOL_ISCSI:
2265 *p_proto = ECORE_PCI_ISCSI;
2267 case FUNC_MF_CFG_PROTOCOL_FCOE:
2268 *p_proto = ECORE_PCI_FCOE;
2270 case FUNC_MF_CFG_PROTOCOL_ROCE:
2271 DP_NOTICE(p_hwfn, true, "RoCE personality is not a valid value!\n");
2280 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
2281 struct ecore_ptt *p_ptt)
2283 struct ecore_mcp_function_info *info;
2284 struct public_func shmem_info;
2286 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
2288 info = &p_hwfn->mcp_info->func_info;
2290 info->pause_on_host = (shmem_info.config &
2291 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
2293 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2295 DP_ERR(p_hwfn, "Unknown personality %08x\n",
2296 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
2300 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
2302 if (shmem_info.mac_upper || shmem_info.mac_lower) {
2303 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
2304 info->mac[1] = (u8)(shmem_info.mac_upper);
2305 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
2306 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
2307 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
2308 info->mac[5] = (u8)(shmem_info.mac_lower);
2310 /* Store primary MAC for later possible WoL */
2311 OSAL_MEMCPY(&p_hwfn->p_dev->wol_mac, info->mac, ETH_ALEN);
2314 /* TODO - are there protocols for which there's no MAC? */
2315 DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
2318 /* TODO - are these calculations true for BE machine? */
2319 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_lower |
2320 (((u64)shmem_info.fcoe_wwn_port_name_upper) << 32);
2321 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_lower |
2322 (((u64)shmem_info.fcoe_wwn_node_name_upper) << 32);
2324 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
2326 info->mtu = (u16)shmem_info.mtu_size;
2328 p_hwfn->hw_info.b_wol_support = ECORE_WOL_SUPPORT_NONE;
2329 p_hwfn->p_dev->wol_config = (u8)ECORE_OV_WOL_DEFAULT;
2330 if (ecore_mcp_is_init(p_hwfn)) {
2331 u32 resp = 0, param = 0;
2332 enum _ecore_status_t rc;
2334 rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2335 DRV_MSG_CODE_OS_WOL, 0, &resp, ¶m);
2336 if (rc != ECORE_SUCCESS)
2338 if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED)
2339 p_hwfn->hw_info.b_wol_support = ECORE_WOL_SUPPORT_PME;
2342 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
2343 "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n",
2344 info->pause_on_host, info->protocol,
2345 info->bandwidth_min, info->bandwidth_max,
2346 info->mac[0], info->mac[1], info->mac[2],
2347 info->mac[3], info->mac[4], info->mac[5],
2348 (unsigned long long)info->wwn_port, (unsigned long long)info->wwn_node, info->ovlan,
2349 (u8)p_hwfn->hw_info.b_wol_support);
2351 return ECORE_SUCCESS;
2354 struct ecore_mcp_link_params
2355 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
2357 if (!p_hwfn || !p_hwfn->mcp_info)
2359 return &p_hwfn->mcp_info->link_input;
2362 struct ecore_mcp_link_state
2363 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
2365 if (!p_hwfn || !p_hwfn->mcp_info)
2369 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
2370 DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
2371 p_hwfn->mcp_info->link_output.link_up = true;
2375 return &p_hwfn->mcp_info->link_output;
2378 struct ecore_mcp_link_capabilities
2379 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
2381 if (!p_hwfn || !p_hwfn->mcp_info)
2383 return &p_hwfn->mcp_info->link_capabilities;
2386 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
2387 struct ecore_ptt *p_ptt)
2389 u32 resp = 0, param = 0;
2390 enum _ecore_status_t rc;
2392 rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2393 DRV_MSG_CODE_NIG_DRAIN, 1000,
2396 /* Wait for the drain to complete before returning */
2402 #ifndef LINUX_REMOVE
2403 const struct ecore_mcp_function_info
2404 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
2406 if (!p_hwfn || !p_hwfn->mcp_info)
2408 return &p_hwfn->mcp_info->func_info;
2411 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
2412 struct ecore_ptt *p_ptt,
2415 enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
2416 struct public_func shmem_info;
2417 int i, count = 0, num_pfs;
2419 num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
2421 for (i = 0; i < num_pfs; i++) {
2422 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
2423 MCP_PF_ID_BY_REL(p_hwfn, i));
2424 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
2427 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2432 if ((1 << ((u32)protocol)) & personalities)
2440 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
2441 struct ecore_ptt *p_ptt,
2447 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2448 DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
2453 if (IS_VF(p_hwfn->p_dev))
2456 flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
2457 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
2458 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
2459 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_OFFSET));
2461 *p_flash_size = flash_size;
2463 return ECORE_SUCCESS;
2466 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
2467 struct ecore_ptt *p_ptt)
2469 struct ecore_dev *p_dev = p_hwfn->p_dev;
2471 if (p_dev->recov_in_prog) {
2472 DP_NOTICE(p_hwfn, false,
2473 "Avoid triggering a recovery since such a process is already in progress\n");
2477 DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
2478 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
2480 return ECORE_SUCCESS;
2483 static enum _ecore_status_t
2484 ecore_mcp_config_vf_msix_bb(struct ecore_hwfn *p_hwfn,
2485 struct ecore_ptt *p_ptt,
2488 u32 resp = 0, param = 0, rc_param = 0;
2489 enum _ecore_status_t rc;
2491 /* Only Leader can configure MSIX, and need to take CMT into account */
2492 if (!IS_LEAD_HWFN(p_hwfn))
2493 return ECORE_SUCCESS;
2494 num *= p_hwfn->p_dev->num_hwfns;
2496 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_OFFSET) &
2497 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
2498 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_OFFSET) &
2499 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
2501 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
2504 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
2505 DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
2509 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2510 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2517 static enum _ecore_status_t
2518 ecore_mcp_config_vf_msix_ah(struct ecore_hwfn *p_hwfn,
2519 struct ecore_ptt *p_ptt,
2522 u32 resp = 0, param = num, rc_param = 0;
2523 enum _ecore_status_t rc;
2525 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
2526 param, &resp, &rc_param);
2528 if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
2529 DP_NOTICE(p_hwfn, true, "MFW failed to set MSI-X for VFs\n");
2532 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2533 "Requested 0x%02x MSI-x interrupts for VFs\n",
2540 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
2541 struct ecore_ptt *p_ptt,
2544 if (ECORE_IS_BB(p_hwfn->p_dev))
2545 return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
2547 return ecore_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
2550 enum _ecore_status_t
2551 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2552 struct ecore_mcp_drv_version *p_ver)
2554 struct ecore_mcp_mb_params mb_params;
2555 struct drv_version_stc drv_version;
2559 enum _ecore_status_t rc;
2562 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
2563 return ECORE_SUCCESS;
2566 OSAL_MEM_ZERO(&drv_version, sizeof(drv_version));
2567 drv_version.version = p_ver->version;
2568 num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
2569 for (i = 0; i < num_words; i++) {
2570 /* The driver name is expected to be in a big-endian format */
2571 p_name = &p_ver->name[i * sizeof(u32)];
2572 val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
2573 *(u32 *)&drv_version.name[i * sizeof(u32)] = val;
2576 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2577 mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2578 mb_params.p_data_src = &drv_version;
2579 mb_params.data_src_size = sizeof(drv_version);
2580 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2581 if (rc != ECORE_SUCCESS)
2582 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2587 /* A maximal 100 msec waiting time for the MCP to halt */
2588 #define ECORE_MCP_HALT_SLEEP_MS 10
2589 #define ECORE_MCP_HALT_MAX_RETRIES 10
2591 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
2592 struct ecore_ptt *p_ptt)
2594 u32 resp = 0, param = 0, cpu_mode, cnt = 0;
2595 enum _ecore_status_t rc;
2597 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2599 if (rc != ECORE_SUCCESS) {
2600 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2605 OSAL_MSLEEP(ECORE_MCP_HALT_SLEEP_MS);
2606 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2607 if (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT)
2609 } while (++cnt < ECORE_MCP_HALT_MAX_RETRIES);
2611 if (cnt == ECORE_MCP_HALT_MAX_RETRIES) {
2612 DP_NOTICE(p_hwfn, false,
2613 "Failed to halt the MCP [CPU_MODE = 0x%08x after %d msec]\n",
2614 cpu_mode, cnt * ECORE_MCP_HALT_SLEEP_MS);
2618 ecore_mcp_cmd_set_blocking(p_hwfn, true);
2620 return ECORE_SUCCESS;
2623 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
2624 struct ecore_ptt *p_ptt)
2626 u32 value, cpu_mode;
2628 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2630 value = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2631 value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2632 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
2633 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2635 if (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) {
2636 DP_NOTICE(p_hwfn, false,
2637 "Failed to resume the MCP [CPU_MODE = 0x%08x]\n",
2642 ecore_mcp_cmd_set_blocking(p_hwfn, false);
2644 return ECORE_SUCCESS;
2647 enum _ecore_status_t
2648 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
2649 struct ecore_ptt *p_ptt,
2650 enum ecore_ov_client client)
2652 u32 resp = 0, param = 0;
2654 enum _ecore_status_t rc;
2657 case ECORE_OV_CLIENT_DRV:
2658 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2660 case ECORE_OV_CLIENT_USER:
2661 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2663 case ECORE_OV_CLIENT_VENDOR_SPEC:
2664 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2667 DP_NOTICE(p_hwfn, true,
2668 "Invalid client type %d\n", client);
2672 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2673 drv_mb_param, &resp, ¶m);
2674 if (rc != ECORE_SUCCESS)
2675 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2680 enum _ecore_status_t
2681 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
2682 struct ecore_ptt *p_ptt,
2683 enum ecore_ov_driver_state drv_state)
2685 u32 resp = 0, param = 0;
2687 enum _ecore_status_t rc;
2689 switch (drv_state) {
2690 case ECORE_OV_DRIVER_STATE_NOT_LOADED:
2691 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2693 case ECORE_OV_DRIVER_STATE_DISABLED:
2694 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2696 case ECORE_OV_DRIVER_STATE_ACTIVE:
2697 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2700 DP_NOTICE(p_hwfn, true,
2701 "Invalid driver state %d\n", drv_state);
2705 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2706 drv_mb_param, &resp, ¶m);
2707 if (rc != ECORE_SUCCESS)
2708 DP_ERR(p_hwfn, "Failed to send driver state\n");
2713 enum _ecore_status_t
2714 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2715 struct ecore_fc_npiv_tbl *p_table)
2717 struct dci_fc_npiv_tbl *p_npiv_table;
2718 u8 *p_buf = OSAL_NULL;
2720 enum _ecore_status_t rc = ECORE_SUCCESS;
2722 p_table->num_wwpn = 0;
2723 p_table->num_wwnn = 0;
2724 addr = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
2725 OFFSETOF(struct public_port, fc_npiv_nvram_tbl_addr));
2726 if (addr == NPIV_TBL_INVALID_ADDR) {
2727 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "NPIV table doesn't exist\n");
2731 size = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
2732 OFFSETOF(struct public_port, fc_npiv_nvram_tbl_size));
2734 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "NPIV table is empty\n");
2738 p_buf = OSAL_VZALLOC(p_hwfn->p_dev, size);
2740 DP_ERR(p_hwfn, "Buffer allocation failed\n");
2744 rc = ecore_mcp_nvm_read(p_hwfn->p_dev, addr, p_buf, size);
2745 if (rc != ECORE_SUCCESS) {
2746 OSAL_VFREE(p_hwfn->p_dev, p_buf);
2750 p_npiv_table = (struct dci_fc_npiv_tbl *)p_buf;
2751 p_table->num_wwpn = (u16)p_npiv_table->fc_npiv_cfg.num_of_npiv;
2752 p_table->num_wwnn = (u16)p_npiv_table->fc_npiv_cfg.num_of_npiv;
2753 for (i = 0; i < p_table->num_wwpn; i++) {
2754 OSAL_MEMCPY(p_table->wwpn, p_npiv_table->settings[i].npiv_wwpn,
2756 OSAL_MEMCPY(p_table->wwnn, p_npiv_table->settings[i].npiv_wwnn,
2760 OSAL_VFREE(p_hwfn->p_dev, p_buf);
2762 return ECORE_SUCCESS;
2765 enum _ecore_status_t
2766 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2769 u32 resp = 0, param = 0;
2771 enum _ecore_status_t rc;
2773 drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_OFFSET;
2774 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
2775 drv_mb_param, &resp, ¶m);
2776 if (rc != ECORE_SUCCESS)
2777 DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
2782 enum _ecore_status_t
2783 ecore_mcp_ov_update_mac(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2786 struct ecore_mcp_mb_params mb_params;
2788 enum _ecore_status_t rc;
2790 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2791 mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
2792 mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
2793 DRV_MSG_CODE_VMAC_TYPE_OFFSET;
2794 mb_params.param |= MCP_PF_ID(p_hwfn);
2796 /* MCP is BE, and on LE platforms PCI would swap access to SHMEM
2797 * in 32-bit granularity.
2798 * So the MAC has to be set in native order [and not byte order],
2799 * otherwise it would be read incorrectly by MFW after swap.
2801 mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3];
2802 mfw_mac[1] = mac[4] << 24 | mac[5] << 16;
2804 mb_params.p_data_src = (u8 *)mfw_mac;
2805 mb_params.data_src_size = 8;
2806 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2807 if (rc != ECORE_SUCCESS)
2808 DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
2810 /* Store primary MAC for later possible WoL */
2811 OSAL_MEMCPY(p_hwfn->p_dev->wol_mac, mac, ETH_ALEN);
2816 enum _ecore_status_t
2817 ecore_mcp_ov_update_wol(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2818 enum ecore_ov_wol wol)
2820 u32 resp = 0, param = 0;
2822 enum _ecore_status_t rc;
2824 if (p_hwfn->hw_info.b_wol_support == ECORE_WOL_SUPPORT_NONE) {
2825 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2826 "Can't change WoL configuration when WoL isn't supported\n");
2831 case ECORE_OV_WOL_DEFAULT:
2832 drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT;
2834 case ECORE_OV_WOL_DISABLED:
2835 drv_mb_param = DRV_MB_PARAM_WOL_DISABLED;
2837 case ECORE_OV_WOL_ENABLED:
2838 drv_mb_param = DRV_MB_PARAM_WOL_ENABLED;
2841 DP_ERR(p_hwfn, "Invalid wol state %d\n", wol);
2845 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL,
2846 drv_mb_param, &resp, ¶m);
2847 if (rc != ECORE_SUCCESS)
2848 DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc);
2850 /* Store the WoL update for a future unload */
2851 p_hwfn->p_dev->wol_config = (u8)wol;
2856 enum _ecore_status_t
2857 ecore_mcp_ov_update_eswitch(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2858 enum ecore_ov_eswitch eswitch)
2860 u32 resp = 0, param = 0;
2862 enum _ecore_status_t rc;
2865 case ECORE_OV_ESWITCH_NONE:
2866 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
2868 case ECORE_OV_ESWITCH_VEB:
2869 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
2871 case ECORE_OV_ESWITCH_VEPA:
2872 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
2875 DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
2879 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
2880 drv_mb_param, &resp, ¶m);
2881 if (rc != ECORE_SUCCESS)
2882 DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
2887 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
2888 struct ecore_ptt *p_ptt,
2889 enum ecore_led_mode mode)
2891 u32 resp = 0, param = 0, drv_mb_param;
2892 enum _ecore_status_t rc;
2895 case ECORE_LED_MODE_ON:
2896 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2898 case ECORE_LED_MODE_OFF:
2899 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2901 case ECORE_LED_MODE_RESTORE:
2902 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2905 DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
2909 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2910 drv_mb_param, &resp, ¶m);
2911 if (rc != ECORE_SUCCESS)
2912 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2917 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
2918 struct ecore_ptt *p_ptt,
2921 u32 resp = 0, param = 0;
2922 enum _ecore_status_t rc;
2924 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
2925 mask_parities, &resp, ¶m);
2927 if (rc != ECORE_SUCCESS) {
2928 DP_ERR(p_hwfn, "MCP response failure for mask parities, aborting\n");
2929 } else if (resp != FW_MSG_CODE_OK) {
2930 DP_ERR(p_hwfn, "MCP did not acknowledge mask parity request. Old MFW?\n");
2937 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
2940 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2941 u32 bytes_left, offset, bytes_to_copy, buf_size;
2942 u32 nvm_offset, resp, param;
2943 struct ecore_ptt *p_ptt;
2944 enum _ecore_status_t rc = ECORE_SUCCESS;
2946 p_ptt = ecore_ptt_acquire(p_hwfn);
2952 while (bytes_left > 0) {
2953 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2954 MCP_DRV_NVM_BUF_LEN);
2955 nvm_offset = (addr + offset) | (bytes_to_copy <<
2956 DRV_MB_PARAM_NVM_LEN_OFFSET);
2957 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2958 DRV_MSG_CODE_NVM_READ_NVRAM,
2959 nvm_offset, &resp, ¶m, &buf_size,
2960 (u32 *)(p_buf + offset));
2961 if (rc != ECORE_SUCCESS || (resp != FW_MSG_CODE_NVM_OK)) {
2962 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2966 /* This can be a lengthy process, and it's possible scheduler
2967 * isn't preemptable. Sleep a bit to prevent CPU hogging.
2969 if (bytes_left % 0x1000 <
2970 (bytes_left - buf_size) % 0x1000)
2974 bytes_left -= buf_size;
2977 p_dev->mcp_nvm_resp = resp;
2978 ecore_ptt_release(p_hwfn, p_ptt);
2983 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
2984 u32 addr, u8 *p_buf, u32 len)
2986 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2987 struct ecore_ptt *p_ptt;
2989 enum _ecore_status_t rc;
2991 p_ptt = ecore_ptt_acquire(p_hwfn);
2995 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2996 (cmd == ECORE_PHY_CORE_READ) ?
2997 DRV_MSG_CODE_PHY_CORE_READ :
2998 DRV_MSG_CODE_PHY_RAW_READ,
2999 addr, &resp, ¶m, &len, (u32 *)p_buf);
3000 if (rc != ECORE_SUCCESS)
3001 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
3003 p_dev->mcp_nvm_resp = resp;
3004 ecore_ptt_release(p_hwfn, p_ptt);
3009 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
3011 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3012 struct ecore_ptt *p_ptt;
3014 p_ptt = ecore_ptt_acquire(p_hwfn);
3018 OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
3019 ecore_ptt_release(p_hwfn, p_ptt);
3021 return ECORE_SUCCESS;
3024 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev,
3027 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3028 struct ecore_ptt *p_ptt;
3030 enum _ecore_status_t rc;
3032 p_ptt = ecore_ptt_acquire(p_hwfn);
3035 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_DEL_FILE, addr,
3037 p_dev->mcp_nvm_resp = resp;
3038 ecore_ptt_release(p_hwfn, p_ptt);
3043 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
3046 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3047 struct ecore_ptt *p_ptt;
3049 enum _ecore_status_t rc;
3051 p_ptt = ecore_ptt_acquire(p_hwfn);
3054 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr,
3056 p_dev->mcp_nvm_resp = resp;
3057 ecore_ptt_release(p_hwfn, p_ptt);
3062 /* rc recieves ECORE_INVAL as default parameter because
3063 * it might not enter the while loop if the len is 0
3065 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
3066 u32 addr, u8 *p_buf, u32 len)
3068 u32 buf_idx, buf_size, nvm_cmd, nvm_offset, resp, param;
3069 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3070 enum _ecore_status_t rc = ECORE_INVAL;
3071 struct ecore_ptt *p_ptt;
3073 p_ptt = ecore_ptt_acquire(p_hwfn);
3078 case ECORE_PUT_FILE_DATA:
3079 nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
3081 case ECORE_NVM_WRITE_NVRAM:
3082 nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
3084 case ECORE_EXT_PHY_FW_UPGRADE:
3085 nvm_cmd = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE;
3088 DP_NOTICE(p_hwfn, true, "Invalid nvm write command 0x%x\n",
3094 while (buf_idx < len) {
3095 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
3096 MCP_DRV_NVM_BUF_LEN);
3097 nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) |
3100 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
3101 &resp, ¶m, buf_size,
3102 (u32 *)&p_buf[buf_idx]);
3103 if (rc != ECORE_SUCCESS ||
3104 ((resp != FW_MSG_CODE_NVM_OK) &&
3105 (resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK)))
3106 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
3108 /* This can be a lengthy process, and it's possible scheduler
3109 * isn't preemptable. Sleep a bit to prevent CPU hogging.
3111 if (buf_idx % 0x1000 >
3112 (buf_idx + buf_size) % 0x1000)
3115 buf_idx += buf_size;
3118 p_dev->mcp_nvm_resp = resp;
3119 ecore_ptt_release(p_hwfn, p_ptt);
3124 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
3125 u32 addr, u8 *p_buf, u32 len)
3127 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3128 struct ecore_ptt *p_ptt;
3129 u32 resp, param, nvm_cmd;
3130 enum _ecore_status_t rc;
3132 p_ptt = ecore_ptt_acquire(p_hwfn);
3136 nvm_cmd = (cmd == ECORE_PHY_CORE_WRITE) ? DRV_MSG_CODE_PHY_CORE_WRITE :
3137 DRV_MSG_CODE_PHY_RAW_WRITE;
3138 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, addr,
3139 &resp, ¶m, len, (u32 *)p_buf);
3140 if (rc != ECORE_SUCCESS)
3141 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
3142 p_dev->mcp_nvm_resp = resp;
3143 ecore_ptt_release(p_hwfn, p_ptt);
3148 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
3151 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3152 struct ecore_ptt *p_ptt;
3154 enum _ecore_status_t rc;
3156 p_ptt = ecore_ptt_acquire(p_hwfn);
3160 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_SECURE_MODE, addr,
3162 p_dev->mcp_nvm_resp = resp;
3163 ecore_ptt_release(p_hwfn, p_ptt);
3168 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
3169 struct ecore_ptt *p_ptt,
3170 u32 port, u32 addr, u32 offset,
3173 u32 bytes_left, bytes_to_copy, buf_size, nvm_offset;
3175 enum _ecore_status_t rc;
3177 nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
3178 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
3182 while (bytes_left > 0) {
3183 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
3184 MAX_I2C_TRANSACTION_SIZE);
3185 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3186 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3187 nvm_offset |= ((addr + offset) <<
3188 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
3189 nvm_offset |= (bytes_to_copy <<
3190 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
3191 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3192 DRV_MSG_CODE_TRANSCEIVER_READ,
3193 nvm_offset, &resp, ¶m, &buf_size,
3194 (u32 *)(p_buf + offset));
3195 if (rc != ECORE_SUCCESS) {
3196 DP_NOTICE(p_hwfn, false,
3197 "Failed to send a transceiver read command to the MFW. rc = %d.\n",
3202 if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
3204 else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3205 return ECORE_UNKNOWN_ERROR;
3208 bytes_left -= buf_size;
3211 return ECORE_SUCCESS;
3214 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
3215 struct ecore_ptt *p_ptt,
3216 u32 port, u32 addr, u32 offset,
3219 u32 buf_idx, buf_size, nvm_offset, resp, param;
3220 enum _ecore_status_t rc;
3222 nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
3223 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
3225 while (buf_idx < len) {
3226 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
3227 MAX_I2C_TRANSACTION_SIZE);
3228 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3229 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3230 nvm_offset |= ((offset + buf_idx) <<
3231 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
3232 nvm_offset |= (buf_size <<
3233 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
3234 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt,
3235 DRV_MSG_CODE_TRANSCEIVER_WRITE,
3236 nvm_offset, &resp, ¶m, buf_size,
3237 (u32 *)&p_buf[buf_idx]);
3238 if (rc != ECORE_SUCCESS) {
3239 DP_NOTICE(p_hwfn, false,
3240 "Failed to send a transceiver write command to the MFW. rc = %d.\n",
3245 if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
3247 else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3248 return ECORE_UNKNOWN_ERROR;
3250 buf_idx += buf_size;
3253 return ECORE_SUCCESS;
3256 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
3257 struct ecore_ptt *p_ptt,
3258 u16 gpio, u32 *gpio_val)
3260 enum _ecore_status_t rc = ECORE_SUCCESS;
3261 u32 drv_mb_param = 0, rsp;
3263 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET);
3265 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
3266 drv_mb_param, &rsp, gpio_val);
3268 if (rc != ECORE_SUCCESS)
3271 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3272 return ECORE_UNKNOWN_ERROR;
3274 return ECORE_SUCCESS;
3277 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
3278 struct ecore_ptt *p_ptt,
3279 u16 gpio, u16 gpio_val)
3281 enum _ecore_status_t rc = ECORE_SUCCESS;
3282 u32 drv_mb_param = 0, param, rsp;
3284 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET) |
3285 (gpio_val << DRV_MB_PARAM_GPIO_VALUE_OFFSET);
3287 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
3288 drv_mb_param, &rsp, ¶m);
3290 if (rc != ECORE_SUCCESS)
3293 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3294 return ECORE_UNKNOWN_ERROR;
3296 return ECORE_SUCCESS;
3299 enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
3300 struct ecore_ptt *p_ptt,
3301 u16 gpio, u32 *gpio_direction,
3304 u32 drv_mb_param = 0, rsp, val = 0;
3305 enum _ecore_status_t rc = ECORE_SUCCESS;
3307 drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET;
3309 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
3310 drv_mb_param, &rsp, &val);
3311 if (rc != ECORE_SUCCESS)
3314 *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
3315 DRV_MB_PARAM_GPIO_DIRECTION_OFFSET;
3316 *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
3317 DRV_MB_PARAM_GPIO_CTRL_OFFSET;
3319 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3320 return ECORE_UNKNOWN_ERROR;
3322 return ECORE_SUCCESS;
3325 enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
3326 struct ecore_ptt *p_ptt)
3328 u32 drv_mb_param = 0, rsp, param;
3329 enum _ecore_status_t rc = ECORE_SUCCESS;
3331 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
3332 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3334 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3335 drv_mb_param, &rsp, ¶m);
3337 if (rc != ECORE_SUCCESS)
3340 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3341 (param != DRV_MB_PARAM_BIST_RC_PASSED))
3342 rc = ECORE_UNKNOWN_ERROR;
3347 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
3348 struct ecore_ptt *p_ptt)
3350 u32 drv_mb_param, rsp, param;
3351 enum _ecore_status_t rc = ECORE_SUCCESS;
3353 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
3354 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3356 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3357 drv_mb_param, &rsp, ¶m);
3359 if (rc != ECORE_SUCCESS)
3362 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3363 (param != DRV_MB_PARAM_BIST_RC_PASSED))
3364 rc = ECORE_UNKNOWN_ERROR;
3369 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
3370 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images)
3372 u32 drv_mb_param = 0, rsp;
3373 enum _ecore_status_t rc = ECORE_SUCCESS;
3375 drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
3376 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3378 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3379 drv_mb_param, &rsp, num_images);
3381 if (rc != ECORE_SUCCESS)
3384 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
3385 rc = ECORE_UNKNOWN_ERROR;
3390 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
3391 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3392 struct bist_nvm_image_att *p_image_att, u32 image_index)
3394 u32 buf_size, nvm_offset, resp, param;
3395 enum _ecore_status_t rc;
3397 nvm_offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
3398 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3399 nvm_offset |= (image_index <<
3400 DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_OFFSET);
3401 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3402 nvm_offset, &resp, ¶m, &buf_size,
3403 (u32 *)p_image_att);
3404 if (rc != ECORE_SUCCESS)
3407 if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3408 (p_image_att->return_code != 1))
3409 rc = ECORE_UNKNOWN_ERROR;
3414 enum _ecore_status_t
3415 ecore_mcp_get_nvm_image_att(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3416 enum ecore_nvm_images image_id,
3417 struct ecore_nvm_image_att *p_image_att)
3419 struct bist_nvm_image_att mfw_image_att;
3420 enum nvm_image_type type;
3422 enum _ecore_status_t rc;
3424 /* Translate image_id into MFW definitions */
3426 case ECORE_NVM_IMAGE_ISCSI_CFG:
3427 type = NVM_TYPE_ISCSI_CFG;
3429 case ECORE_NVM_IMAGE_FCOE_CFG:
3430 type = NVM_TYPE_FCOE_CFG;
3432 case ECORE_NVM_IMAGE_MDUMP:
3433 type = NVM_TYPE_MDUMP;
3436 DP_NOTICE(p_hwfn, false, "Unknown request of image_id %08x\n",
3441 /* Learn number of images, then traverse and see if one fits */
3442 rc = ecore_mcp_bist_nvm_test_get_num_images(p_hwfn, p_ptt, &num_images);
3443 if (rc != ECORE_SUCCESS || !num_images)
3446 for (i = 0; i < num_images; i++) {
3447 rc = ecore_mcp_bist_nvm_test_get_image_att(p_hwfn, p_ptt,
3449 if (rc != ECORE_SUCCESS)
3452 if (type == mfw_image_att.image_type)
3455 if (i == num_images) {
3456 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3457 "Failed to find nvram image of type %08x\n",
3462 p_image_att->start_addr = mfw_image_att.nvm_start_addr;
3463 p_image_att->length = mfw_image_att.len;
3465 return ECORE_SUCCESS;
3468 enum _ecore_status_t ecore_mcp_get_nvm_image(struct ecore_hwfn *p_hwfn,
3469 struct ecore_ptt *p_ptt,
3470 enum ecore_nvm_images image_id,
3471 u8 *p_buffer, u32 buffer_len)
3473 struct ecore_nvm_image_att image_att;
3474 enum _ecore_status_t rc;
3476 OSAL_MEM_ZERO(p_buffer, buffer_len);
3478 rc = ecore_mcp_get_nvm_image_att(p_hwfn, p_ptt, image_id, &image_att);
3479 if (rc != ECORE_SUCCESS)
3482 /* Validate sizes - both the image's and the supplied buffer's */
3483 if (image_att.length <= 4) {
3484 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3485 "Image [%d] is too small - only %d bytes\n",
3486 image_id, image_att.length);
3490 /* Each NVM image is suffixed by CRC; Upper-layer has no need for it */
3491 image_att.length -= 4;
3493 if (image_att.length > buffer_len) {
3494 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3495 "Image [%d] is too big - %08x bytes where only %08x are available\n",
3496 image_id, image_att.length, buffer_len);
3500 return ecore_mcp_nvm_read(p_hwfn->p_dev, image_att.start_addr,
3501 p_buffer, image_att.length);
3504 enum _ecore_status_t
3505 ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
3506 struct ecore_ptt *p_ptt,
3507 struct ecore_temperature_info *p_temp_info)
3509 struct ecore_temperature_sensor *p_temp_sensor;
3510 struct temperature_status_stc mfw_temp_info;
3511 struct ecore_mcp_mb_params mb_params;
3513 enum _ecore_status_t rc;
3516 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3517 mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
3518 mb_params.p_data_dst = &mfw_temp_info;
3519 mb_params.data_dst_size = sizeof(mfw_temp_info);
3520 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3521 if (rc != ECORE_SUCCESS)
3524 OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
3525 p_temp_info->num_sensors = OSAL_MIN_T(u32, mfw_temp_info.num_of_sensors,
3526 ECORE_MAX_NUM_OF_SENSORS);
3527 for (i = 0; i < p_temp_info->num_sensors; i++) {
3528 val = mfw_temp_info.sensor[i];
3529 p_temp_sensor = &p_temp_info->sensors[i];
3530 p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
3531 SENSOR_LOCATION_OFFSET;
3532 p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
3533 THRESHOLD_HIGH_OFFSET;
3534 p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
3535 CRITICAL_TEMPERATURE_OFFSET;
3536 p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
3537 CURRENT_TEMP_OFFSET;
3540 return ECORE_SUCCESS;
3543 enum _ecore_status_t ecore_mcp_get_mba_versions(
3544 struct ecore_hwfn *p_hwfn,
3545 struct ecore_ptt *p_ptt,
3546 struct ecore_mba_vers *p_mba_vers)
3548 u32 buf_size, resp, param;
3549 enum _ecore_status_t rc;
3551 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MBA_VERSION,
3552 0, &resp, ¶m, &buf_size,
3553 &(p_mba_vers->mba_vers[0]));
3555 if (rc != ECORE_SUCCESS)
3558 if ((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
3559 rc = ECORE_UNKNOWN_ERROR;
3561 if (buf_size != MCP_DRV_NVM_BUF_LEN)
3562 rc = ECORE_UNKNOWN_ERROR;
3567 enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
3568 struct ecore_ptt *p_ptt,
3573 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS,
3574 0, &rsp, (u32 *)num_events);
3577 static enum resource_id_enum
3578 ecore_mcp_get_mfw_res_id(enum ecore_resources res_id)
3580 enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
3584 mfw_res_id = RESOURCE_NUM_SB_E;
3586 case ECORE_L2_QUEUE:
3587 mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
3590 mfw_res_id = RESOURCE_NUM_VPORT_E;
3593 mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
3596 mfw_res_id = RESOURCE_NUM_PQ_E;
3599 mfw_res_id = RESOURCE_NUM_RL_E;
3603 /* Each VFC resource can accommodate both a MAC and a VLAN */
3604 mfw_res_id = RESOURCE_VFC_FILTER_E;
3607 mfw_res_id = RESOURCE_ILT_E;
3609 case ECORE_LL2_QUEUE:
3610 mfw_res_id = RESOURCE_LL2_QUEUE_E;
3612 case ECORE_RDMA_CNQ_RAM:
3613 case ECORE_CMDQS_CQS:
3614 /* CNQ/CMDQS are the same resource */
3615 mfw_res_id = RESOURCE_CQS_E;
3617 case ECORE_RDMA_STATS_QUEUE:
3618 mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
3621 mfw_res_id = RESOURCE_BDQ_E;
3630 #define ECORE_RESC_ALLOC_VERSION_MAJOR 2
3631 #define ECORE_RESC_ALLOC_VERSION_MINOR 0
3632 #define ECORE_RESC_ALLOC_VERSION \
3633 ((ECORE_RESC_ALLOC_VERSION_MAJOR << \
3634 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET) | \
3635 (ECORE_RESC_ALLOC_VERSION_MINOR << \
3636 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET))
3638 struct ecore_resc_alloc_in_params {
3640 enum ecore_resources res_id;
3644 struct ecore_resc_alloc_out_params {
3654 static enum _ecore_status_t
3655 ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
3656 struct ecore_ptt *p_ptt,
3657 struct ecore_resc_alloc_in_params *p_in_params,
3658 struct ecore_resc_alloc_out_params *p_out_params)
3660 struct ecore_mcp_mb_params mb_params;
3661 struct resource_info mfw_resc_info;
3662 enum _ecore_status_t rc;
3664 OSAL_MEM_ZERO(&mfw_resc_info, sizeof(mfw_resc_info));
3666 mfw_resc_info.res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id);
3667 if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
3669 "Failed to match resource %d [%s] with the MFW resources\n",
3670 p_in_params->res_id,
3671 ecore_hw_get_resc_name(p_in_params->res_id));
3675 switch (p_in_params->cmd) {
3676 case DRV_MSG_SET_RESOURCE_VALUE_MSG:
3677 mfw_resc_info.size = p_in_params->resc_max_val;
3679 case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
3682 DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
3687 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3688 mb_params.cmd = p_in_params->cmd;
3689 mb_params.param = ECORE_RESC_ALLOC_VERSION;
3690 mb_params.p_data_src = &mfw_resc_info;
3691 mb_params.data_src_size = sizeof(mfw_resc_info);
3692 mb_params.p_data_dst = mb_params.p_data_src;
3693 mb_params.data_dst_size = mb_params.data_src_size;
3695 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3696 "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
3697 p_in_params->cmd, p_in_params->res_id,
3698 ecore_hw_get_resc_name(p_in_params->res_id),
3699 GET_MFW_FIELD(mb_params.param,
3700 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3701 GET_MFW_FIELD(mb_params.param,
3702 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3703 p_in_params->resc_max_val);
3705 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3706 if (rc != ECORE_SUCCESS)
3709 p_out_params->mcp_resp = mb_params.mcp_resp;
3710 p_out_params->mcp_param = mb_params.mcp_param;
3711 p_out_params->resc_num = mfw_resc_info.size;
3712 p_out_params->resc_start = mfw_resc_info.offset;
3713 p_out_params->vf_resc_num = mfw_resc_info.vf_size;
3714 p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
3715 p_out_params->flags = mfw_resc_info.flags;
3717 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3718 "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
3719 GET_MFW_FIELD(p_out_params->mcp_param,
3720 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3721 GET_MFW_FIELD(p_out_params->mcp_param,
3722 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3723 p_out_params->resc_num, p_out_params->resc_start,
3724 p_out_params->vf_resc_num, p_out_params->vf_resc_start,
3725 p_out_params->flags);
3727 return ECORE_SUCCESS;
3730 enum _ecore_status_t
3731 ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3732 enum ecore_resources res_id, u32 resc_max_val,
3735 struct ecore_resc_alloc_out_params out_params;
3736 struct ecore_resc_alloc_in_params in_params;
3737 enum _ecore_status_t rc;
3739 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
3740 in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
3741 in_params.res_id = res_id;
3742 in_params.resc_max_val = resc_max_val;
3743 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
3744 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3746 if (rc != ECORE_SUCCESS)
3749 *p_mcp_resp = out_params.mcp_resp;
3751 return ECORE_SUCCESS;
3754 enum _ecore_status_t
3755 ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3756 enum ecore_resources res_id, u32 *p_mcp_resp,
3757 u32 *p_resc_num, u32 *p_resc_start)
3759 struct ecore_resc_alloc_out_params out_params;
3760 struct ecore_resc_alloc_in_params in_params;
3761 enum _ecore_status_t rc;
3763 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
3764 in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
3765 in_params.res_id = res_id;
3766 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
3767 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3769 if (rc != ECORE_SUCCESS)
3772 *p_mcp_resp = out_params.mcp_resp;
3774 if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
3775 *p_resc_num = out_params.resc_num;
3776 *p_resc_start = out_params.resc_start;
3779 return ECORE_SUCCESS;
3782 enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
3783 struct ecore_ptt *p_ptt)
3785 u32 mcp_resp, mcp_param;
3787 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
3788 &mcp_resp, &mcp_param);
3791 enum _ecore_status_t ecore_mcp_get_lldp_mac(struct ecore_hwfn *p_hwfn,
3792 struct ecore_ptt *p_ptt,
3793 u8 lldp_mac_addr[ETH_ALEN])
3795 struct ecore_mcp_mb_params mb_params;
3796 struct mcp_mac lldp_mac;
3797 enum _ecore_status_t rc;
3799 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3800 mb_params.cmd = DRV_MSG_CODE_GET_LLDP_MAC;
3801 mb_params.p_data_dst = &lldp_mac;
3802 mb_params.data_dst_size = sizeof(lldp_mac);
3803 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3804 if (rc != ECORE_SUCCESS)
3807 if (mb_params.mcp_resp != FW_MSG_CODE_OK) {
3808 DP_NOTICE(p_hwfn, false,
3809 "MFW lacks support for the GET_LLDP_MAC command [resp 0x%08x]\n",
3810 mb_params.mcp_resp);
3814 *(u16 *)lldp_mac_addr = *(u16 *)&lldp_mac.mac_upper;
3815 *(u32 *)(lldp_mac_addr + 2) = lldp_mac.mac_lower;
3817 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3818 "LLDP MAC address is %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx\n",
3819 lldp_mac_addr[0], lldp_mac_addr[1], lldp_mac_addr[2],
3820 lldp_mac_addr[3], lldp_mac_addr[4], lldp_mac_addr[5]);
3822 return ECORE_SUCCESS;
3825 enum _ecore_status_t ecore_mcp_set_lldp_mac(struct ecore_hwfn *p_hwfn,
3826 struct ecore_ptt *p_ptt,
3827 u8 lldp_mac_addr[ETH_ALEN])
3829 struct ecore_mcp_mb_params mb_params;
3830 struct mcp_mac lldp_mac;
3831 enum _ecore_status_t rc;
3833 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3834 "Configuring LLDP MAC address to %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx\n",
3835 lldp_mac_addr[0], lldp_mac_addr[1], lldp_mac_addr[2],
3836 lldp_mac_addr[3], lldp_mac_addr[4], lldp_mac_addr[5]);
3838 OSAL_MEM_ZERO(&lldp_mac, sizeof(lldp_mac));
3839 lldp_mac.mac_upper = *(u16 *)lldp_mac_addr;
3840 lldp_mac.mac_lower = *(u32 *)(lldp_mac_addr + 2);
3842 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3843 mb_params.cmd = DRV_MSG_CODE_SET_LLDP_MAC;
3844 mb_params.p_data_src = &lldp_mac;
3845 mb_params.data_src_size = sizeof(lldp_mac);
3846 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3847 if (rc != ECORE_SUCCESS)
3850 if (mb_params.mcp_resp != FW_MSG_CODE_OK) {
3851 DP_NOTICE(p_hwfn, false,
3852 "MFW lacks support for the SET_LLDP_MAC command [resp 0x%08x]\n",
3853 mb_params.mcp_resp);
3857 return ECORE_SUCCESS;
3860 static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn,
3861 struct ecore_ptt *p_ptt,
3862 u32 param, u32 *p_mcp_resp,
3865 enum _ecore_status_t rc;
3867 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
3868 p_mcp_resp, p_mcp_param);
3869 if (rc != ECORE_SUCCESS)
3872 if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3874 "The resource command is unsupported by the MFW\n");
3875 return ECORE_NOTIMPL;
3878 if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
3879 u8 opcode = GET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
3881 DP_NOTICE(p_hwfn, false,
3882 "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
3890 static enum _ecore_status_t
3891 __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3892 struct ecore_resc_lock_params *p_params)
3894 u32 param = 0, mcp_resp, mcp_param;
3896 enum _ecore_status_t rc;
3898 switch (p_params->timeout) {
3899 case ECORE_MCP_RESC_LOCK_TO_DEFAULT:
3900 opcode = RESOURCE_OPCODE_REQ;
3901 p_params->timeout = 0;
3903 case ECORE_MCP_RESC_LOCK_TO_NONE:
3904 opcode = RESOURCE_OPCODE_REQ_WO_AGING;
3905 p_params->timeout = 0;
3908 opcode = RESOURCE_OPCODE_REQ_W_AGING;
3912 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3913 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3914 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
3916 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3917 "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
3918 param, p_params->timeout, opcode, p_params->resource);
3920 /* Attempt to acquire the resource */
3921 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3923 if (rc != ECORE_SUCCESS)
3926 /* Analyze the response */
3927 p_params->owner = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
3928 opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3930 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3931 "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
3932 mcp_param, opcode, p_params->owner);
3935 case RESOURCE_OPCODE_GNT:
3936 p_params->b_granted = true;
3938 case RESOURCE_OPCODE_BUSY:
3939 p_params->b_granted = false;
3942 DP_NOTICE(p_hwfn, false,
3943 "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3948 return ECORE_SUCCESS;
3951 enum _ecore_status_t
3952 ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3953 struct ecore_resc_lock_params *p_params)
3956 enum _ecore_status_t rc;
3959 /* No need for an interval before the first iteration */
3961 if (p_params->sleep_b4_retry) {
3962 u16 retry_interval_in_ms =
3963 DIV_ROUND_UP(p_params->retry_interval,
3966 OSAL_MSLEEP(retry_interval_in_ms);
3968 OSAL_UDELAY(p_params->retry_interval);
3972 rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params);
3973 if (rc != ECORE_SUCCESS)
3976 if (p_params->b_granted)
3978 } while (retry_cnt++ < p_params->retry_num);
3980 return ECORE_SUCCESS;
3983 enum _ecore_status_t
3984 ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3985 struct ecore_resc_unlock_params *p_params)
3987 u32 param = 0, mcp_resp, mcp_param;
3989 enum _ecore_status_t rc;
3991 opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
3992 : RESOURCE_OPCODE_RELEASE;
3993 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3994 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3996 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3997 "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
3998 param, opcode, p_params->resource);
4000 /* Attempt to release the resource */
4001 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
4003 if (rc != ECORE_SUCCESS)
4006 /* Analyze the response */
4007 opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
4009 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4010 "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
4014 case RESOURCE_OPCODE_RELEASED_PREVIOUS:
4016 "Resource unlock request for an already released resource [%d]\n",
4017 p_params->resource);
4019 case RESOURCE_OPCODE_RELEASED:
4020 p_params->b_released = true;
4022 case RESOURCE_OPCODE_WRONG_OWNER:
4023 p_params->b_released = false;
4026 DP_NOTICE(p_hwfn, false,
4027 "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
4032 return ECORE_SUCCESS;
4035 void ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params *p_lock,
4036 struct ecore_resc_unlock_params *p_unlock,
4037 enum ecore_resc_lock resource,
4038 bool b_is_permanent)
4040 if (p_lock != OSAL_NULL) {
4041 OSAL_MEM_ZERO(p_lock, sizeof(*p_lock));
4043 /* Permanent resources don't require aging, and there's no
4044 * point in trying to acquire them more than once since it's
4045 * unexpected another entity would release them.
4047 if (b_is_permanent) {
4048 p_lock->timeout = ECORE_MCP_RESC_LOCK_TO_NONE;
4050 p_lock->retry_num = ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT;
4051 p_lock->retry_interval =
4052 ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT;
4053 p_lock->sleep_b4_retry = true;
4056 p_lock->resource = resource;
4059 if (p_unlock != OSAL_NULL) {
4060 OSAL_MEM_ZERO(p_unlock, sizeof(*p_unlock));
4061 p_unlock->resource = resource;
4065 enum _ecore_status_t
4066 ecore_mcp_update_fcoe_cvid(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4069 u32 resp = 0, param = 0;
4070 enum _ecore_status_t rc;
4072 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OEM_UPDATE_FCOE_CVID,
4073 (u32)vlan << DRV_MB_PARAM_FCOE_CVID_OFFSET,
4075 if (rc != ECORE_SUCCESS)
4076 DP_ERR(p_hwfn, "Failed to update fcoe vlan, rc = %d\n", rc);
4081 enum _ecore_status_t
4082 ecore_mcp_update_fcoe_fabric_name(struct ecore_hwfn *p_hwfn,
4083 struct ecore_ptt *p_ptt, u8 *wwn)
4085 struct ecore_mcp_mb_params mb_params;
4086 struct mcp_wwn fabric_name;
4087 enum _ecore_status_t rc;
4089 OSAL_MEM_ZERO(&fabric_name, sizeof(fabric_name));
4090 fabric_name.wwn_upper = *(u32 *)wwn;
4091 fabric_name.wwn_lower = *(u32 *)(wwn + 4);
4093 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
4094 mb_params.cmd = DRV_MSG_CODE_OEM_UPDATE_FCOE_FABRIC_NAME;
4095 mb_params.p_data_src = &fabric_name;
4096 mb_params.data_src_size = sizeof(fabric_name);
4097 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4098 if (rc != ECORE_SUCCESS)
4099 DP_ERR(p_hwfn, "Failed to update fcoe wwn, rc = %d\n", rc);
4104 void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4105 u32 offset, u32 val)
4107 struct ecore_mcp_mb_params mb_params = {0};
4108 enum _ecore_status_t rc = ECORE_SUCCESS;
4111 mb_params.cmd = DRV_MSG_CODE_WRITE_WOL_REG;
4112 mb_params.param = offset;
4113 mb_params.p_data_src = &dword;
4114 mb_params.data_src_size = sizeof(dword);
4116 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4117 if (rc != ECORE_SUCCESS) {
4118 DP_NOTICE(p_hwfn, false,
4119 "Failed to wol write request, rc = %d\n", rc);
4122 if (mb_params.mcp_resp != FW_MSG_CODE_WOL_READ_WRITE_OK) {
4123 DP_NOTICE(p_hwfn, false,
4124 "Failed to write value 0x%x to offset 0x%x [mcp_resp 0x%x]\n",
4125 val, offset, mb_params.mcp_resp);
4126 rc = ECORE_UNKNOWN_ERROR;
4130 enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn,
4131 struct ecore_ptt *p_ptt)
4134 enum _ecore_status_t rc;
4136 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
4137 0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
4138 if (rc == ECORE_SUCCESS)
4139 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_PROBE),
4140 "MFW supported features: %08x\n",
4141 p_hwfn->mcp_info->capabilities);
4146 enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn,
4147 struct ecore_ptt *p_ptt)
4149 u32 mcp_resp, mcp_param, features;
4151 features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ |
4152 DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE;
4154 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
4155 features, &mcp_resp, &mcp_param);