2 * Copyright (c) 2017-2018 Cavium, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
36 #include "ecore_gtt_reg_addr.h"
37 #include "ecore_hsi_common.h"
39 #include "ecore_sp_api.h"
40 #include "ecore_spq.h"
41 #include "ecore_iro.h"
42 #include "ecore_init_fw_funcs.h"
43 #include "ecore_cxt.h"
44 #include "ecore_int.h"
45 #include "ecore_dev_api.h"
46 #include "ecore_mcp.h"
47 #ifdef CONFIG_ECORE_RDMA
48 #include "ecore_rdma.h"
51 #include "ecore_sriov.h"
52 #ifdef CONFIG_ECORE_ISCSI
53 #include "ecore_iscsi.h"
54 #include "ecore_ooo.h"
59 #pragma warning(disable : 28167)
60 #pragma warning(disable : 28123)
63 /***************************************************************************
64 * Structures & Definitions
65 ***************************************************************************/
67 #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
69 #define SPQ_BLOCK_DELAY_MAX_ITER (10)
70 #define SPQ_BLOCK_DELAY_US (10)
71 #define SPQ_BLOCK_SLEEP_MAX_ITER (200)
72 #define SPQ_BLOCK_SLEEP_MS (5)
75 /***************************************************************************
77 ***************************************************************************/
78 static void ecore_iscsi_eq_dump(struct ecore_hwfn *p_hwfn,
79 struct event_ring_entry *p_eqe)
81 if (p_eqe->opcode >= MAX_ISCSI_EQE_OPCODE) {
82 DP_NOTICE(p_hwfn, false, "Unknown iSCSI EQ: %x\n",
86 switch (p_eqe->opcode) {
87 case ISCSI_EVENT_TYPE_INIT_FUNC:
88 case ISCSI_EVENT_TYPE_DESTROY_FUNC:
91 case ISCSI_EVENT_TYPE_OFFLOAD_CONN:
92 case ISCSI_EVENT_TYPE_TERMINATE_CONN:
93 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
94 "iSCSI EQE: Port %x, Op %x, echo %x, FWret %x, CID %x, ConnID %x, ERR %x\n",
95 p_hwfn->port_id, p_eqe->opcode,
96 OSAL_LE16_TO_CPU(p_eqe->echo),
97 p_eqe->fw_return_code,
98 OSAL_LE16_TO_CPU(p_eqe->data.iscsi_info.icid),
99 OSAL_LE16_TO_CPU(p_eqe->data.iscsi_info.conn_id),
100 p_eqe->data.iscsi_info.error_code);
102 case ISCSI_EVENT_TYPE_UPDATE_CONN:
103 case ISCSI_EVENT_TYPE_CLEAR_SQ:
104 case ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE:
105 case ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE:
106 case ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD:
107 case ISCSI_EVENT_TYPE_ASYN_CLOSE_RCVD:
108 case ISCSI_EVENT_TYPE_ASYN_SYN_RCVD:
109 case ISCSI_EVENT_TYPE_ASYN_MAX_RT_TIME:
110 case ISCSI_EVENT_TYPE_ASYN_MAX_RT_CNT:
111 case ISCSI_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT:
112 case ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2:
113 case ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR:
114 case ISCSI_EVENT_TYPE_TCP_CONN_ERROR:
122 /***************************************************************************
123 * Blocking Imp. (BLOCK/EBLOCK mode)
124 ***************************************************************************/
125 static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn, void *cookie,
126 union event_ring_data OSAL_UNUSED *data,
129 struct ecore_spq_comp_done *comp_done;
131 comp_done = (struct ecore_spq_comp_done *)cookie;
133 comp_done->done = 0x1;
134 comp_done->fw_return_code = fw_return_code;
136 /* make update visible to waiting thread */
137 OSAL_SMP_WMB(p_hwfn->p_dev);
140 static enum _ecore_status_t __ecore_spq_block(struct ecore_hwfn *p_hwfn,
141 struct ecore_spq_entry *p_ent,
143 bool sleep_between_iter)
145 struct ecore_spq_comp_done *comp_done;
148 comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
149 iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
150 : SPQ_BLOCK_DELAY_MAX_ITER;
152 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && sleep_between_iter)
157 OSAL_POLL_MODE_DPC(p_hwfn);
158 OSAL_SMP_RMB(p_hwfn->p_dev);
159 if (comp_done->done == 1) {
161 *p_fw_ret = comp_done->fw_return_code;
162 return ECORE_SUCCESS;
165 if (sleep_between_iter) {
166 OSAL_MSLEEP(SPQ_BLOCK_SLEEP_MS);
168 OSAL_UDELAY(SPQ_BLOCK_DELAY_US);
172 return ECORE_TIMEOUT;
175 static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
176 struct ecore_spq_entry *p_ent,
177 u8 *p_fw_ret, bool skip_quick_poll)
179 struct ecore_spq_comp_done *comp_done;
180 struct ecore_ptt *p_ptt;
181 enum _ecore_status_t rc;
183 /* A relatively short polling period w/o sleeping, to allow the FW to
184 * complete the ramrod and thus possibly to avoid the following sleeps.
186 if (!skip_quick_poll) {
187 rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, false);
188 if (rc == ECORE_SUCCESS)
189 return ECORE_SUCCESS;
192 /* Move to polling with a sleeping period between iterations */
193 rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
194 if (rc == ECORE_SUCCESS)
195 return ECORE_SUCCESS;
197 p_ptt = ecore_ptt_acquire(p_hwfn);
201 DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
202 rc = ecore_mcp_drain(p_hwfn, p_ptt);
203 ecore_ptt_release(p_hwfn, p_ptt);
204 if (rc != ECORE_SUCCESS) {
205 DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
209 /* Retry after drain */
210 rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
211 if (rc == ECORE_SUCCESS)
212 return ECORE_SUCCESS;
214 comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
215 if (comp_done->done == 1) {
217 *p_fw_ret = comp_done->fw_return_code;
218 return ECORE_SUCCESS;
221 DP_NOTICE(p_hwfn, true,
222 "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
223 OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
224 p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id,
225 OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
227 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
232 /***************************************************************************
233 * SPQ entries inner API
234 ***************************************************************************/
235 static enum _ecore_status_t ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn,
236 struct ecore_spq_entry *p_ent)
240 switch (p_ent->comp_mode) {
241 case ECORE_SPQ_MODE_EBLOCK:
242 case ECORE_SPQ_MODE_BLOCK:
243 p_ent->comp_cb.function = ecore_spq_blocking_cb;
245 case ECORE_SPQ_MODE_CB:
248 DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
253 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
254 "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
255 p_ent->elem.hdr.cid, p_ent->elem.hdr.cmd_id,
256 p_ent->elem.hdr.protocol_id,
257 p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
258 D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
259 ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
262 return ECORE_SUCCESS;
265 /***************************************************************************
267 ***************************************************************************/
268 static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
269 struct ecore_spq *p_spq)
271 struct e4_core_conn_context *p_cxt;
272 struct ecore_cxt_info cxt_info;
274 enum _ecore_status_t rc;
276 cxt_info.iid = p_spq->cid;
278 rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
281 DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d\n",
286 p_cxt = cxt_info.p_cxt;
288 /* @@@TBD we zero the context until we have ilt_reset implemented. */
289 OSAL_MEM_ZERO(p_cxt, sizeof(*p_cxt));
291 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) {
292 SET_FIELD(p_cxt->xstorm_ag_context.flags10,
293 E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
294 SET_FIELD(p_cxt->xstorm_ag_context.flags1,
295 E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
296 /*SET_FIELD(p_cxt->xstorm_ag_context.flags10,
297 E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);*/
298 SET_FIELD(p_cxt->xstorm_ag_context.flags9,
299 E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
301 ECORE_E5_MISSING_CODE;
304 /* CDU validation - FIXME currently disabled */
306 /* QM physical queue */
307 physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
308 p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(physical_q);
310 p_cxt->xstorm_st_context.spq_base_lo =
311 DMA_LO_LE(p_spq->chain.p_phys_addr);
312 p_cxt->xstorm_st_context.spq_base_hi =
313 DMA_HI_LE(p_spq->chain.p_phys_addr);
315 DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
316 p_hwfn->p_consq->chain.p_phys_addr);
319 static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
320 struct ecore_spq *p_spq,
321 struct ecore_spq_entry *p_ent)
323 struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
324 struct core_db_data *p_db_data = &p_spq->db_data;
325 u16 echo = ecore_chain_get_prod_idx(p_chain);
326 struct slow_path_element *elem;
328 p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
329 elem = ecore_chain_produce(p_chain);
331 DP_NOTICE(p_hwfn, true, "Failed to produce from SPQ chain\n");
335 *elem = p_ent->elem; /* Struct assignment */
337 p_db_data->spq_prod =
338 OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
340 /* Make sure the SPQE is updated before the doorbell */
341 OSAL_WMB(p_hwfn->p_dev);
343 DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data);
345 /* Make sure doorbell was rung */
346 OSAL_WMB(p_hwfn->p_dev);
348 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
349 "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
350 p_spq->db_addr_offset, p_spq->cid, p_db_data->params,
351 p_db_data->agg_flags, ecore_chain_get_prod_idx(p_chain));
353 return ECORE_SUCCESS;
356 /***************************************************************************
357 * Asynchronous events
358 ***************************************************************************/
360 static enum _ecore_status_t
361 ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
362 struct event_ring_entry *p_eqe)
364 ecore_spq_async_comp_cb cb;
366 if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE)) {
370 cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
372 return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
373 &p_eqe->data, p_eqe->fw_return_code);
376 true, "Unknown Async completion for protocol: %d\n",
383 ecore_spq_register_async_cb(struct ecore_hwfn *p_hwfn,
384 enum protocol_type protocol_id,
385 ecore_spq_async_comp_cb cb)
387 if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE)) {
391 p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
392 return ECORE_SUCCESS;
396 ecore_spq_unregister_async_cb(struct ecore_hwfn *p_hwfn,
397 enum protocol_type protocol_id)
399 if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE)) {
403 p_hwfn->p_spq->async_comp_cb[protocol_id] = OSAL_NULL;
406 /***************************************************************************
408 ***************************************************************************/
409 void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn,
412 u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
413 USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
415 REG_WR16(p_hwfn, addr, prod);
417 /* keep prod updates ordered */
418 OSAL_MMIOWB(p_hwfn->p_dev);
421 enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
425 struct ecore_eq *p_eq = cookie;
426 struct ecore_chain *p_chain = &p_eq->chain;
427 enum _ecore_status_t rc = 0;
429 /* take a snapshot of the FW consumer */
430 u16 fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
432 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
434 /* Need to guarantee the fw_cons index we use points to a usuable
435 * element (to comply with our chain), so our macros would comply
437 if ((fw_cons_idx & ecore_chain_get_usable_per_page(p_chain)) ==
438 ecore_chain_get_usable_per_page(p_chain)) {
439 fw_cons_idx += ecore_chain_get_unusable_per_page(p_chain);
442 /* Complete current segment of eq entries */
443 while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) {
444 struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain);
452 "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
453 p_eqe->opcode, /* Event Opcode */
454 p_eqe->protocol_id, /* Event Protocol ID */
455 p_eqe->reserved0, /* Reserved */
456 OSAL_LE16_TO_CPU(p_eqe->echo),/* Echo value from
457 ramrod data on the host
459 p_eqe->fw_return_code, /* FW return code for SP
464 if (p_eqe->protocol_id == PROTOCOLID_ISCSI)
465 ecore_iscsi_eq_dump(p_hwfn, p_eqe);
468 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
469 if (ecore_async_event_completion(p_hwfn, p_eqe))
471 } else if (ecore_spq_completion(p_hwfn,
473 p_eqe->fw_return_code,
478 ecore_chain_recycle_consumed(p_chain);
481 ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(p_chain));
483 /* Attempt to post pending requests */
484 OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
485 rc = ecore_spq_pend_post(p_hwfn);
486 OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
491 enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
493 struct ecore_eq *p_eq;
495 /* Allocate EQ struct */
496 p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq));
498 DP_NOTICE(p_hwfn, false,
499 "Failed to allocate `struct ecore_eq'\n");
503 /* Allocate and initialize EQ chain*/
504 if (ecore_chain_alloc(p_hwfn->p_dev,
505 ECORE_CHAIN_USE_TO_PRODUCE,
506 ECORE_CHAIN_MODE_PBL,
507 ECORE_CHAIN_CNT_TYPE_U16,
509 sizeof(union event_ring_element),
510 &p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) {
511 DP_NOTICE(p_hwfn, false, "Failed to allocate eq chain\n");
512 goto eq_allocate_fail;
515 /* register EQ completion on the SP SB */
516 ecore_int_register_cb(p_hwfn, ecore_eq_completion,
517 p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
520 return ECORE_SUCCESS;
523 OSAL_FREE(p_hwfn->p_dev, p_eq);
527 void ecore_eq_setup(struct ecore_hwfn *p_hwfn)
529 ecore_chain_reset(&p_hwfn->p_eq->chain);
532 void ecore_eq_free(struct ecore_hwfn *p_hwfn)
537 ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_eq->chain);
539 OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_eq);
540 p_hwfn->p_eq = OSAL_NULL;
543 /***************************************************************************
544 * CQE API - manipulate EQ functionallity
545 ***************************************************************************/
546 static enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn,
547 struct eth_slow_path_rx_cqe *cqe,
548 enum protocol_type protocol)
550 if (IS_VF(p_hwfn->p_dev))
551 return OSAL_VF_CQE_COMPLETION(p_hwfn, cqe, protocol);
553 /* @@@tmp - it's possible we'll eventually want to handle some
554 * actual commands that can arrive here, but for now this is only
555 * used to complete the ramrod using the echo value on the cqe
557 return ecore_spq_completion(p_hwfn, cqe->echo, 0, OSAL_NULL);
560 enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
561 struct eth_slow_path_rx_cqe *cqe)
563 enum _ecore_status_t rc;
565 rc = ecore_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
567 DP_NOTICE(p_hwfn, true,
568 "Failed to handle RXQ CQE [cmd 0x%02x]\n",
575 /***************************************************************************
576 * Slow hwfn Queue (spq)
577 ***************************************************************************/
578 void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
580 struct ecore_spq *p_spq = p_hwfn->p_spq;
581 struct ecore_spq_entry *p_virt = OSAL_NULL;
582 struct core_db_data *p_db_data;
583 void OSAL_IOMEM *db_addr;
584 dma_addr_t p_phys = 0;
586 enum _ecore_status_t rc;
588 OSAL_LIST_INIT(&p_spq->pending);
589 OSAL_LIST_INIT(&p_spq->completion_pending);
590 OSAL_LIST_INIT(&p_spq->free_pool);
591 OSAL_LIST_INIT(&p_spq->unlimited_pending);
592 OSAL_SPIN_LOCK_INIT(&p_spq->lock);
595 p_phys = p_spq->p_phys + OFFSETOF(struct ecore_spq_entry, ramrod);
596 p_virt = p_spq->p_virt;
598 capacity = ecore_chain_get_capacity(&p_spq->chain);
599 for (i = 0; i < capacity; i++) {
600 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
602 OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool);
605 p_phys += sizeof(struct ecore_spq_entry);
609 p_spq->normal_count = 0;
610 p_spq->comp_count = 0;
611 p_spq->comp_sent_count = 0;
612 p_spq->unlimited_pending_count = 0;
614 OSAL_MEM_ZERO(p_spq->p_comp_bitmap,
615 SPQ_COMP_BMAP_SIZE * sizeof(unsigned long));
616 p_spq->comp_bitmap_idx = 0;
618 /* SPQ cid, cannot fail */
619 ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
620 ecore_spq_hw_initialize(p_hwfn, p_spq);
622 /* reset the chain itself */
623 ecore_chain_reset(&p_spq->chain);
625 /* Initialize the address/data of the SPQ doorbell */
626 p_spq->db_addr_offset = DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY);
627 p_db_data = &p_spq->db_data;
628 OSAL_MEM_ZERO(p_db_data, sizeof(*p_db_data));
629 SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM);
630 SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX);
631 SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL,
632 DQ_XCM_CORE_SPQ_PROD_CMD);
633 p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
635 /* Register the SPQ doorbell with the doorbell recovery mechanism */
636 db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
637 rc = ecore_db_recovery_add(p_hwfn->p_dev, db_addr, &p_spq->db_data,
638 DB_REC_WIDTH_32B, DB_REC_KERNEL);
639 if (rc != ECORE_SUCCESS)
641 "Failed to register the SPQ doorbell with the doorbell recovery mechanism\n");
644 enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
646 struct ecore_spq_entry *p_virt = OSAL_NULL;
647 struct ecore_spq *p_spq = OSAL_NULL;
648 dma_addr_t p_phys = 0;
653 OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
655 DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_spq'\n");
660 if (ecore_chain_alloc(p_hwfn->p_dev,
661 ECORE_CHAIN_USE_TO_PRODUCE,
662 ECORE_CHAIN_MODE_SINGLE,
663 ECORE_CHAIN_CNT_TYPE_U16,
664 0, /* N/A when the mode is SINGLE */
665 sizeof(struct slow_path_element),
666 &p_spq->chain, OSAL_NULL)) {
667 DP_NOTICE(p_hwfn, false, "Failed to allocate spq chain\n");
668 goto spq_allocate_fail;
671 /* allocate and fill the SPQ elements (incl. ramrod data list) */
672 capacity = ecore_chain_get_capacity(&p_spq->chain);
673 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys,
675 sizeof(struct ecore_spq_entry));
677 goto spq_allocate_fail;
680 p_spq->p_virt = p_virt;
681 p_spq->p_phys = p_phys;
683 #ifdef CONFIG_ECORE_LOCK_ALLOC
684 if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock))
685 goto spq_allocate_fail;
688 p_hwfn->p_spq = p_spq;
689 return ECORE_SUCCESS;
692 ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
693 OSAL_FREE(p_hwfn->p_dev, p_spq);
697 void ecore_spq_free(struct ecore_hwfn *p_hwfn)
699 struct ecore_spq *p_spq = p_hwfn->p_spq;
700 void OSAL_IOMEM *db_addr;
706 /* Delete the SPQ doorbell from the doorbell recovery mechanism */
707 db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
708 ecore_db_recovery_del(p_hwfn->p_dev, db_addr, &p_spq->db_data);
711 capacity = ecore_chain_get_capacity(&p_spq->chain);
712 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
716 sizeof(struct ecore_spq_entry));
719 ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
720 #ifdef CONFIG_ECORE_LOCK_ALLOC
721 OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
724 OSAL_FREE(p_hwfn->p_dev, p_spq);
725 p_hwfn->p_spq = OSAL_NULL;
728 enum _ecore_status_t ecore_spq_get_entry(struct ecore_hwfn *p_hwfn,
729 struct ecore_spq_entry **pp_ent)
731 struct ecore_spq *p_spq = p_hwfn->p_spq;
732 struct ecore_spq_entry *p_ent = OSAL_NULL;
733 enum _ecore_status_t rc = ECORE_SUCCESS;
735 OSAL_SPIN_LOCK(&p_spq->lock);
737 if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
739 p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent));
741 DP_NOTICE(p_hwfn, false, "Failed to allocate an SPQ entry for a pending ramrod\n");
745 p_ent->queue = &p_spq->unlimited_pending;
747 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
748 struct ecore_spq_entry,
750 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->free_pool);
751 p_ent->queue = &p_spq->pending;
757 OSAL_SPIN_UNLOCK(&p_spq->lock);
761 /* Locked variant; Should be called while the SPQ lock is taken */
762 static void __ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
763 struct ecore_spq_entry *p_ent)
765 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_hwfn->p_spq->free_pool);
768 void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
769 struct ecore_spq_entry *p_ent)
771 OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
772 __ecore_spq_return_entry(p_hwfn, p_ent);
773 OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
777 * @brief ecore_spq_add_entry - adds a new entry to the pending
778 * list. Should be used while lock is being held.
780 * Addes an entry to the pending list is there is room (en empty
781 * element is avaliable in the free_pool), or else places the
782 * entry in the unlimited_pending pool.
788 * @return enum _ecore_status_t
790 static enum _ecore_status_t ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
791 struct ecore_spq_entry *p_ent,
792 enum spq_priority priority)
794 struct ecore_spq *p_spq = p_hwfn->p_spq;
796 if (p_ent->queue == &p_spq->unlimited_pending) {
797 if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
799 OSAL_LIST_PUSH_TAIL(&p_ent->list,
800 &p_spq->unlimited_pending);
801 p_spq->unlimited_pending_count++;
803 return ECORE_SUCCESS;
806 struct ecore_spq_entry *p_en2;
808 p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
809 struct ecore_spq_entry,
811 OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool);
813 /* Copy the ring element physical pointer to the new
814 * entry, since we are about to override the entire ring
815 * entry and don't want to lose the pointer.
817 p_ent->elem.data_ptr = p_en2->elem.data_ptr;
821 /* EBLOCK responsible to free the allocated p_ent */
822 if (p_ent->comp_mode != ECORE_SPQ_MODE_EBLOCK)
823 OSAL_FREE(p_hwfn->p_dev, p_ent);
829 /* entry is to be placed in 'pending' queue */
831 case ECORE_SPQ_PRIORITY_NORMAL:
832 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->pending);
833 p_spq->normal_count++;
835 case ECORE_SPQ_PRIORITY_HIGH:
836 OSAL_LIST_PUSH_HEAD(&p_ent->list, &p_spq->pending);
843 return ECORE_SUCCESS;
846 /***************************************************************************
848 ***************************************************************************/
850 u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn)
852 if (!p_hwfn->p_spq) {
853 return 0xffffffff; /* illegal */
855 return p_hwfn->p_spq->cid;
858 /***************************************************************************
859 * Posting new Ramrods
860 ***************************************************************************/
862 static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
866 struct ecore_spq *p_spq = p_hwfn->p_spq;
867 enum _ecore_status_t rc;
869 /* TODO - implementation might be wasteful; will always keep room
870 * for an additional high priority ramrod (even if one is already
873 while (ecore_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
874 !OSAL_LIST_IS_EMPTY(head)) {
875 struct ecore_spq_entry *p_ent =
876 OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list);
877 if (p_ent != OSAL_NULL) {
879 #pragma warning(suppress : 6011 28182)
881 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
882 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->completion_pending);
883 p_spq->comp_sent_count++;
885 rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
887 OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
888 &p_spq->completion_pending);
889 __ecore_spq_return_entry(p_hwfn, p_ent);
895 return ECORE_SUCCESS;
898 enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
900 struct ecore_spq *p_spq = p_hwfn->p_spq;
901 struct ecore_spq_entry *p_ent = OSAL_NULL;
903 while (!OSAL_LIST_IS_EMPTY(&p_spq->free_pool))
905 if (OSAL_LIST_IS_EMPTY(&p_spq->unlimited_pending))
908 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->unlimited_pending,
909 struct ecore_spq_entry,
915 #pragma warning(suppress : 6011)
917 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending);
919 ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
922 return ecore_spq_post_list(p_hwfn, &p_spq->pending,
923 SPQ_HIGH_PRI_RESERVE_DEFAULT);
926 enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
927 struct ecore_spq_entry *p_ent,
930 enum _ecore_status_t rc = ECORE_SUCCESS;
931 struct ecore_spq *p_spq = p_hwfn ? p_hwfn->p_spq : OSAL_NULL;
932 bool b_ret_ent = true;
938 DP_NOTICE(p_hwfn, true, "Got a NULL pointer\n");
942 if (p_hwfn->p_dev->recov_in_prog) {
943 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
944 "Recovery is in progress -> skip spq post [cmd %02x protocol %02x]\n",
945 p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
946 /* Return success to let the flows to be completed successfully
947 * w/o any error handling.
949 return ECORE_SUCCESS;
952 OSAL_SPIN_LOCK(&p_spq->lock);
954 /* Complete the entry */
955 rc = ecore_spq_fill_entry(p_hwfn, p_ent);
957 /* Check return value after LOCK is taken for cleaner error flow */
961 /* Add the request to the pending queue */
962 rc = ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
966 rc = ecore_spq_pend_post(p_hwfn);
968 /* Since it's possible that pending failed for a different
969 * entry [although unlikely], the failed entry was already
970 * dealt with; No need to return it here.
976 OSAL_SPIN_UNLOCK(&p_spq->lock);
978 if (p_ent->comp_mode == ECORE_SPQ_MODE_EBLOCK) {
979 /* For entries in ECORE BLOCK mode, the completion code cannot
980 * perform the necessary cleanup - if it did, we couldn't
981 * access p_ent here to see whether it's successful or not.
982 * Thus, after gaining the answer perform the cleanup here.
984 rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code,
985 p_ent->queue == &p_spq->unlimited_pending);
987 if (p_ent->queue == &p_spq->unlimited_pending) {
988 /* This is an allocated p_ent which does not need to
991 OSAL_FREE(p_hwfn->p_dev, p_ent);
993 /* TBD: handle error flow and remove p_ent from
1000 goto spq_post_fail2;
1002 /* return to pool */
1003 ecore_spq_return_entry(p_hwfn, p_ent);
1008 OSAL_SPIN_LOCK(&p_spq->lock);
1009 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->completion_pending);
1010 ecore_chain_return_produced(&p_spq->chain);
1013 /* return to the free pool */
1015 __ecore_spq_return_entry(p_hwfn, p_ent);
1016 OSAL_SPIN_UNLOCK(&p_spq->lock);
1021 enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
1024 union event_ring_data *p_data)
1026 struct ecore_spq *p_spq;
1027 struct ecore_spq_entry *p_ent = OSAL_NULL;
1028 struct ecore_spq_entry *tmp;
1029 struct ecore_spq_entry *found = OSAL_NULL;
1035 p_spq = p_hwfn->p_spq;
1040 OSAL_SPIN_LOCK(&p_spq->lock);
1041 OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent,
1043 &p_spq->completion_pending,
1045 struct ecore_spq_entry) {
1047 if (p_ent->elem.hdr.echo == echo) {
1048 OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
1049 &p_spq->completion_pending);
1051 /* Avoid overriding of SPQ entries when getting
1052 * out-of-order completions, by marking the completions
1053 * in a bitmap and increasing the chain consumer only
1054 * for the first successive completed entries.
1056 SPQ_COMP_BMAP_SET_BIT(p_spq, echo);
1057 while (SPQ_COMP_BMAP_TEST_BIT(p_spq,
1058 p_spq->comp_bitmap_idx)) {
1059 SPQ_COMP_BMAP_CLEAR_BIT(p_spq,
1060 p_spq->comp_bitmap_idx);
1061 p_spq->comp_bitmap_idx++;
1062 ecore_chain_return_produced(&p_spq->chain);
1065 p_spq->comp_count++;
1070 /* This is debug and should be relatively uncommon - depends
1071 * on scenarios which have mutliple per-PF sent ramrods.
1073 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
1074 "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
1075 OSAL_LE16_TO_CPU(echo),
1076 OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
1079 /* Release lock before callback, as callback may post
1080 * an additional ramrod.
1082 OSAL_SPIN_UNLOCK(&p_spq->lock);
1085 DP_NOTICE(p_hwfn, true,
1086 "Failed to find an entry this EQE [echo %04x] completes\n",
1087 OSAL_LE16_TO_CPU(echo));
1088 return ECORE_EXISTS;
1091 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
1092 "Complete EQE [echo %04x]: func %p cookie %p)\n",
1093 OSAL_LE16_TO_CPU(echo),
1094 p_ent->comp_cb.function, p_ent->comp_cb.cookie);
1095 if (found->comp_cb.function)
1096 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
1099 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Got a completion without a callback function\n");
1101 if ((found->comp_mode != ECORE_SPQ_MODE_EBLOCK) ||
1102 (found->queue == &p_spq->unlimited_pending))
1103 /* EBLOCK is responsible for returning its own entry into the
1104 * free list, unless it originally added the entry into the
1105 * unlimited pending list.
1107 ecore_spq_return_entry(p_hwfn, found);
1109 return ECORE_SUCCESS;
1112 enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
1114 struct ecore_consq *p_consq;
1116 /* Allocate ConsQ struct */
1117 p_consq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_consq));
1119 DP_NOTICE(p_hwfn, false,
1120 "Failed to allocate `struct ecore_consq'\n");
1124 /* Allocate and initialize EQ chain*/
1125 if (ecore_chain_alloc(p_hwfn->p_dev,
1126 ECORE_CHAIN_USE_TO_PRODUCE,
1127 ECORE_CHAIN_MODE_PBL,
1128 ECORE_CHAIN_CNT_TYPE_U16,
1129 ECORE_CHAIN_PAGE_SIZE/0x80,
1131 &p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) {
1132 DP_NOTICE(p_hwfn, false, "Failed to allocate consq chain");
1133 goto consq_allocate_fail;
1136 p_hwfn->p_consq = p_consq;
1137 return ECORE_SUCCESS;
1139 consq_allocate_fail:
1140 OSAL_FREE(p_hwfn->p_dev, p_consq);
1144 void ecore_consq_setup(struct ecore_hwfn *p_hwfn)
1146 ecore_chain_reset(&p_hwfn->p_consq->chain);
1149 void ecore_consq_free(struct ecore_hwfn *p_hwfn)
1151 if (!p_hwfn->p_consq)
1154 ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_consq->chain);
1156 OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_consq);
1157 p_hwfn->p_consq = OSAL_NULL;
1161 #pragma warning(pop)