2 * Copyright (c) 2017-2018 Cavium, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
29 * File : ecore_init_ops.c
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 /* include the precompiled configuration values - only once */
36 #include "ecore_hsi_common.h"
39 #include "ecore_status.h"
40 #include "ecore_rt_defs.h"
41 #include "ecore_init_fw_funcs.h"
43 #ifndef CONFIG_ECORE_BINARY_FW
44 #ifdef CONFIG_ECORE_ZIPPED_FW
45 #include "ecore_init_values_zipped.h"
47 #include "ecore_init_values.h"
51 #include "ecore_iro_values.h"
52 #include "ecore_sriov.h"
53 #include "ecore_gtt_values.h"
55 #include "ecore_init_ops.h"
57 #define ECORE_INIT_MAX_POLL_COUNT 100
58 #define ECORE_INIT_POLL_PERIOD_US 500
60 void ecore_init_iro_array(struct ecore_dev *p_dev)
62 p_dev->iro_arr = iro_arr;
65 /* Runtime configuration helpers */
66 void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn)
70 for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
71 p_hwfn->rt_data.b_valid[i] = false;
74 void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn,
75 u32 rt_offset, u32 val)
77 if (rt_offset >= RUNTIME_ARRAY_SIZE) {
79 "Avoid storing %u in rt_data at index %u since RUNTIME_ARRAY_SIZE is %u!\n",
80 val, rt_offset, RUNTIME_ARRAY_SIZE);
84 p_hwfn->rt_data.init_val[rt_offset] = val;
85 p_hwfn->rt_data.b_valid[rt_offset] = true;
88 void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
89 u32 rt_offset, u32 *p_val,
94 if ((rt_offset + size - 1) >= RUNTIME_ARRAY_SIZE) {
96 "Avoid storing values in rt_data at indices %u-%u since RUNTIME_ARRAY_SIZE is %u!\n",
97 rt_offset, (u32)(rt_offset + size - 1),
102 for (i = 0; i < size / sizeof(u32); i++) {
103 p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
104 p_hwfn->rt_data.b_valid[rt_offset + i] = true;
109 static enum _ecore_status_t ecore_init_rt(struct ecore_hwfn *p_hwfn,
110 struct ecore_ptt *p_ptt,
116 u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
117 bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
119 enum _ecore_status_t rc = ECORE_SUCCESS;
121 /* Since not all RT entries are initialized, go over the RT and
122 * for each segment of initialized values use DMA.
124 for (i = 0; i < size; i++) {
128 /* In case there isn't any wide-bus configuration here,
129 * simply write the data instead of using dmae.
132 ecore_wr(p_hwfn, p_ptt, addr + (i << 2),
137 /* Start of a new segment */
138 for (segment = 1; i + segment < size; segment++)
139 if (!p_valid[i + segment])
142 rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
143 (osal_uintptr_t)(p_init_val + i),
144 addr + (i << 2), segment,
145 OSAL_NULL /* default parameters */);
146 if (rc != ECORE_SUCCESS)
149 /* Jump over the entire segment, including invalid entry */
156 enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn)
158 struct ecore_rt_data *rt_data = &p_hwfn->rt_data;
160 if (IS_VF(p_hwfn->p_dev))
161 return ECORE_SUCCESS;
163 rt_data->b_valid = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
164 sizeof(bool) * RUNTIME_ARRAY_SIZE);
165 if (!rt_data->b_valid)
168 rt_data->init_val = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
169 sizeof(u32) * RUNTIME_ARRAY_SIZE);
170 if (!rt_data->init_val) {
171 OSAL_FREE(p_hwfn->p_dev, rt_data->b_valid);
172 rt_data->b_valid = OSAL_NULL;
176 return ECORE_SUCCESS;
179 void ecore_init_free(struct ecore_hwfn *p_hwfn)
181 OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.init_val);
182 p_hwfn->rt_data.init_val = OSAL_NULL;
183 OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.b_valid);
184 p_hwfn->rt_data.b_valid = OSAL_NULL;
187 static enum _ecore_status_t ecore_init_array_dmae(struct ecore_hwfn *p_hwfn,
188 struct ecore_ptt *p_ptt,
189 u32 addr, u32 dmae_data_offset,
190 u32 size, const u32 *p_buf,
191 bool b_must_dmae, bool b_can_dmae)
193 enum _ecore_status_t rc = ECORE_SUCCESS;
195 /* Perform DMAE only for lengthy enough sections or for wide-bus */
197 if ((CHIP_REV_IS_SLOW(p_hwfn->p_dev) && (size < 16)) ||
198 !b_can_dmae || (!b_must_dmae && (size < 16))) {
200 if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
202 const u32 *data = p_buf + dmae_data_offset;
205 for (i = 0; i < size; i++)
206 ecore_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
208 rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
209 (osal_uintptr_t)(p_buf +
212 OSAL_NULL /* default parameters */);
218 static enum _ecore_status_t ecore_init_fill_dmae(struct ecore_hwfn *p_hwfn,
219 struct ecore_ptt *p_ptt,
220 u32 addr, u32 fill_count)
222 static u32 zero_buffer[DMAE_MAX_RW_SIZE];
223 struct ecore_dmae_params params;
225 OSAL_MEMSET(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
227 OSAL_MEMSET(¶ms, 0, sizeof(params));
228 params.flags = ECORE_DMAE_FLAG_RW_REPL_SRC;
229 return ecore_dmae_host2grc(p_hwfn, p_ptt,
230 (osal_uintptr_t)(&(zero_buffer[0])),
231 addr, fill_count, ¶ms);
234 static void ecore_init_fill(struct ecore_hwfn *p_hwfn,
235 struct ecore_ptt *p_ptt,
236 u32 addr, u32 fill, u32 fill_count)
240 for (i = 0; i < fill_count; i++, addr += sizeof(u32))
241 ecore_wr(p_hwfn, p_ptt, addr, fill);
245 static enum _ecore_status_t ecore_init_cmd_array(struct ecore_hwfn *p_hwfn,
246 struct ecore_ptt *p_ptt,
247 struct init_write_op *cmd,
251 u32 dmae_array_offset = OSAL_LE32_TO_CPU(cmd->args.array_offset);
252 u32 data = OSAL_LE32_TO_CPU(cmd->data);
253 u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
254 #ifdef CONFIG_ECORE_ZIPPED_FW
255 u32 offset, output_len, input_len, max_size;
257 struct ecore_dev *p_dev = p_hwfn->p_dev;
258 union init_array_hdr *hdr;
259 const u32 *array_data;
260 enum _ecore_status_t rc = ECORE_SUCCESS;
263 array_data = p_dev->fw_data->arr_data;
265 hdr = (union init_array_hdr *) (array_data +
267 data = OSAL_LE32_TO_CPU(hdr->raw.data);
268 switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
269 case INIT_ARR_ZIPPED:
270 #ifdef CONFIG_ECORE_ZIPPED_FW
271 offset = dmae_array_offset + 1;
272 input_len = GET_FIELD(data,
273 INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
274 max_size = MAX_ZIPPED_SIZE * 4;
275 OSAL_MEMSET(p_hwfn->unzip_buf, 0, max_size);
277 output_len = OSAL_UNZIP_DATA(p_hwfn, input_len,
278 (u8 *)&array_data[offset],
279 max_size, (u8 *)p_hwfn->unzip_buf);
281 rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, 0,
284 b_must_dmae, b_can_dmae);
286 DP_NOTICE(p_hwfn, true,
287 "Failed to unzip dmae data\n");
291 DP_NOTICE(p_hwfn, true,
292 "Using zipped firmware without config enabled\n");
296 case INIT_ARR_PATTERN:
298 u32 repeats = GET_FIELD(data,
299 INIT_ARRAY_PATTERN_HDR_REPETITIONS);
302 size = GET_FIELD(data,
303 INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
305 for (i = 0; i < repeats; i++, addr += size << 2) {
306 rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
307 dmae_array_offset + 1,
309 b_must_dmae, b_can_dmae);
315 case INIT_ARR_STANDARD:
316 size = GET_FIELD(data,
317 INIT_ARRAY_STANDARD_HDR_SIZE);
318 rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
319 dmae_array_offset + 1,
321 b_must_dmae, b_can_dmae);
328 /* init_ops write command */
329 static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn,
330 struct ecore_ptt *p_ptt,
331 struct init_write_op *p_cmd,
334 u32 data = OSAL_LE32_TO_CPU(p_cmd->data);
335 bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
336 u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
337 enum _ecore_status_t rc = ECORE_SUCCESS;
340 if (b_must_dmae && !b_can_dmae) {
341 DP_NOTICE(p_hwfn, true,
342 "Need to write to %08x for Wide-bus but DMAE isn't allowed\n",
347 switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
348 case INIT_SRC_INLINE:
349 data = OSAL_LE32_TO_CPU(p_cmd->args.inline_val);
350 ecore_wr(p_hwfn, p_ptt, addr, data);
353 data = OSAL_LE32_TO_CPU(p_cmd->args.zeros_count);
354 if (b_must_dmae || (b_can_dmae && (data >= 64)))
355 rc = ecore_init_fill_dmae(p_hwfn, p_ptt, addr, data);
357 ecore_init_fill(p_hwfn, p_ptt, addr, 0, data);
360 rc = ecore_init_cmd_array(p_hwfn, p_ptt, p_cmd,
361 b_must_dmae, b_can_dmae);
363 case INIT_SRC_RUNTIME:
364 rc = ecore_init_rt(p_hwfn, p_ptt, addr,
365 OSAL_LE16_TO_CPU(p_cmd->args.runtime.offset),
366 OSAL_LE16_TO_CPU(p_cmd->args.runtime.size),
374 static OSAL_INLINE bool comp_eq(u32 val, u32 expected_val)
376 return (val == expected_val);
379 static OSAL_INLINE bool comp_and(u32 val, u32 expected_val)
381 return (val & expected_val) == expected_val;
384 static OSAL_INLINE bool comp_or(u32 val, u32 expected_val)
386 return (val | expected_val) > 0;
389 /* init_ops read/poll commands */
390 static void ecore_init_cmd_rd(struct ecore_hwfn *p_hwfn,
391 struct ecore_ptt *p_ptt,
392 struct init_read_op *cmd)
394 bool (*comp_check)(u32 val, u32 expected_val);
395 u32 delay = ECORE_INIT_POLL_PERIOD_US, val;
396 u32 data, addr, poll;
399 data = OSAL_LE32_TO_CPU(cmd->op_data);
400 addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
401 poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
404 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
408 val = ecore_rd(p_hwfn, p_ptt, addr);
410 if (poll == INIT_POLL_NONE)
415 comp_check = comp_eq;
418 comp_check = comp_or;
421 comp_check = comp_and;
424 DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
429 data = OSAL_LE32_TO_CPU(cmd->expected_val);
431 i < ECORE_INIT_MAX_POLL_COUNT && !comp_check(val, data);
434 val = ecore_rd(p_hwfn, p_ptt, addr);
437 if (i == ECORE_INIT_MAX_POLL_COUNT)
438 DP_ERR(p_hwfn, "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n",
440 OSAL_LE32_TO_CPU(cmd->expected_val), val,
441 OSAL_LE32_TO_CPU(cmd->op_data));
444 /* init_ops callbacks entry point */
445 static enum _ecore_status_t ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn,
446 struct ecore_ptt *p_ptt,
447 struct init_callback_op *p_cmd)
449 enum _ecore_status_t rc;
451 switch (p_cmd->callback_id) {
453 rc = ecore_dmae_sanity(p_hwfn, p_ptt, "engine_phase");
456 DP_NOTICE(p_hwfn, false, "Unexpected init op callback ID %d\n",
464 static u8 ecore_init_cmd_mode_match(struct ecore_hwfn *p_hwfn,
465 u16 *p_offset, int modes)
467 struct ecore_dev *p_dev = p_hwfn->p_dev;
468 const u8 *modes_tree_buf;
469 u8 arg1, arg2, tree_val;
471 modes_tree_buf = p_dev->fw_data->modes_tree_buf;
472 tree_val = modes_tree_buf[(*p_offset)++];
474 case INIT_MODE_OP_NOT:
475 return ecore_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1;
476 case INIT_MODE_OP_OR:
477 arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
478 arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
480 case INIT_MODE_OP_AND:
481 arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
482 arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
485 tree_val -= MAX_INIT_MODE_OPS;
486 return (modes & (1 << tree_val)) ? 1 : 0;
490 static u32 ecore_init_cmd_mode(struct ecore_hwfn *p_hwfn,
491 struct init_if_mode_op *p_cmd, int modes)
493 u16 offset = OSAL_LE16_TO_CPU(p_cmd->modes_buf_offset);
495 if (ecore_init_cmd_mode_match(p_hwfn, &offset, modes))
498 return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data),
499 INIT_IF_MODE_OP_CMD_OFFSET);
502 static u32 ecore_init_cmd_phase(struct init_if_phase_op *p_cmd,
503 u32 phase, u32 phase_id)
505 u32 data = OSAL_LE32_TO_CPU(p_cmd->phase_data);
506 u32 op_data = OSAL_LE32_TO_CPU(p_cmd->op_data);
508 if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
509 (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
510 GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
511 return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET);
516 enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
517 struct ecore_ptt *p_ptt,
522 struct ecore_dev *p_dev = p_hwfn->p_dev;
523 u32 cmd_num, num_init_ops;
524 union init_op *init_ops;
526 enum _ecore_status_t rc = ECORE_SUCCESS;
528 num_init_ops = p_dev->fw_data->init_ops_size;
529 init_ops = p_dev->fw_data->init_ops;
531 #ifdef CONFIG_ECORE_ZIPPED_FW
532 p_hwfn->unzip_buf = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
533 MAX_ZIPPED_SIZE * 4);
534 if (!p_hwfn->unzip_buf) {
535 DP_NOTICE(p_hwfn, true, "Failed to allocate unzip buffer\n");
540 for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
541 union init_op *cmd = &init_ops[cmd_num];
542 u32 data = OSAL_LE32_TO_CPU(cmd->raw.op_data);
544 switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
546 rc = ecore_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
551 ecore_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
554 case INIT_OP_IF_MODE:
555 cmd_num += ecore_init_cmd_mode(p_hwfn, &cmd->if_mode,
558 case INIT_OP_IF_PHASE:
559 cmd_num += ecore_init_cmd_phase(&cmd->if_phase, phase,
561 b_dmae = GET_FIELD(data,
562 INIT_IF_PHASE_OP_DMAE_ENABLE);
565 /* ecore_init_run is always invoked from
568 OSAL_UDELAY(cmd->delay.delay);
571 case INIT_OP_CALLBACK:
572 rc = ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
579 #ifdef CONFIG_ECORE_ZIPPED_FW
580 OSAL_FREE(p_hwfn->p_dev, p_hwfn->unzip_buf);
581 p_hwfn->unzip_buf = OSAL_NULL;
586 void ecore_gtt_init(struct ecore_hwfn *p_hwfn,
587 struct ecore_ptt *p_ptt)
593 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
594 /* This is done by MFW on ASIC; regardless, this should only
595 * be done once per chip [i.e., common]. Implementation is
596 * not too bright, but it should work on the simple FPGA/EMUL
599 static bool initialized = false;
603 /* initialize PTT/GTT (poll for completion) */
605 ecore_wr(p_hwfn, p_ptt,
606 PGLUE_B_REG_START_INIT_PTT_GTT, 1);
611 /* ptt might be overrided by HW until this is done */
613 ecore_ptt_invalidate(p_hwfn);
614 val = ecore_rd(p_hwfn, p_ptt,
615 PGLUE_B_REG_INIT_DONE_PTT_GTT);
616 } while ((val != 1) && --poll_cnt);
619 DP_ERR(p_hwfn, "PGLUE_B_REG_INIT_DONE didn't complete\n");
623 /* Set the global windows */
624 gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
626 for (i = 0; i < OSAL_ARRAY_SIZE(pxp_global_win); i++)
627 if (pxp_global_win[i])
628 REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
632 enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
633 #ifdef CONFIG_ECORE_BINARY_FW
636 const u8 OSAL_UNUSED *fw_data)
639 struct ecore_fw_data *fw = p_dev->fw_data;
641 #ifdef CONFIG_ECORE_BINARY_FW
642 struct bin_buffer_hdr *buf_hdr;
646 DP_NOTICE(p_dev, true, "Invalid fw data\n");
650 buf_hdr = (struct bin_buffer_hdr *)fw_data;
652 offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
653 fw->fw_ver_info = (struct fw_ver_info *)(fw_data + offset);
655 offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
656 fw->init_ops = (union init_op *)(fw_data + offset);
658 offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
659 fw->arr_data = (u32 *)(fw_data + offset);
661 offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
662 fw->modes_tree_buf = (u8 *)(fw_data + offset);
663 len = buf_hdr[BIN_BUF_INIT_CMD].length;
664 fw->init_ops_size = len / sizeof(struct init_raw_op);
666 fw->init_ops = (union init_op *)init_ops;
667 fw->arr_data = (u32 *)init_val;
668 fw->modes_tree_buf = (u8 *)modes_tree_buf;
669 fw->init_ops_size = init_ops_size;
672 return ECORE_SUCCESS;