2 * Copyright (c) 2017-2018 Cavium, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
36 #include "ecore_gtt_reg_addr.h"
37 #include "ecore_hsi_common.h"
39 #include "ecore_sp_api.h"
40 #include "ecore_spq.h"
41 #include "ecore_iro.h"
42 #include "ecore_init_fw_funcs.h"
43 #include "ecore_cxt.h"
44 #include "ecore_int.h"
45 #include "ecore_dev_api.h"
46 #include "ecore_mcp.h"
47 #ifdef CONFIG_ECORE_ROCE
48 #include "ecore_roce.h"
51 #include "ecore_sriov.h"
52 #ifdef CONFIG_ECORE_ISCSI
53 #include "ecore_iscsi.h"
54 #include "ecore_ooo.h"
57 /***************************************************************************
58 * Structures & Definitions
59 ***************************************************************************/
61 #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
63 #define SPQ_BLOCK_DELAY_MAX_ITER (10)
64 #define SPQ_BLOCK_DELAY_US (10)
65 #define SPQ_BLOCK_SLEEP_MAX_ITER (1000)
66 #define SPQ_BLOCK_SLEEP_MS (5)
69 /***************************************************************************
71 ***************************************************************************/
72 static void ecore_iscsi_eq_dump(struct ecore_hwfn *p_hwfn,
73 struct event_ring_entry *p_eqe)
75 if (p_eqe->opcode >= MAX_ISCSI_EQE_OPCODE) {
76 DP_NOTICE(p_hwfn, false, "Unknown iSCSI EQ: %x\n",
80 switch (p_eqe->opcode) {
81 case ISCSI_EVENT_TYPE_INIT_FUNC:
82 case ISCSI_EVENT_TYPE_DESTROY_FUNC:
85 case ISCSI_EVENT_TYPE_OFFLOAD_CONN:
86 case ISCSI_EVENT_TYPE_TERMINATE_CONN:
87 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
88 "iSCSI EQE: Port %x, Op %x, echo %x, FWret %x, CID %x, ConnID %x, ERR %x\n",
89 p_hwfn->port_id, p_eqe->opcode,
90 OSAL_LE16_TO_CPU(p_eqe->echo),
91 p_eqe->fw_return_code,
92 OSAL_LE32_TO_CPU(p_eqe->data.iscsi_info.cid),
93 OSAL_LE16_TO_CPU(p_eqe->data.iscsi_info.conn_id),
94 p_eqe->data.iscsi_info.error_code);
96 case ISCSI_EVENT_TYPE_UPDATE_CONN:
97 case ISCSI_EVENT_TYPE_CLEAR_SQ:
98 case ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE:
99 case ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE:
100 case ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD:
101 case ISCSI_EVENT_TYPE_ASYN_CLOSE_RCVD:
102 case ISCSI_EVENT_TYPE_ASYN_SYN_RCVD:
103 case ISCSI_EVENT_TYPE_ASYN_MAX_RT_TIME:
104 case ISCSI_EVENT_TYPE_ASYN_MAX_RT_CNT:
105 case ISCSI_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT:
106 case ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2:
107 case ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR:
108 case ISCSI_EVENT_TYPE_TCP_CONN_ERROR:
116 /***************************************************************************
117 * Blocking Imp. (BLOCK/EBLOCK mode)
118 ***************************************************************************/
119 static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn,
121 union event_ring_data *data,
124 struct ecore_spq_comp_done *comp_done;
126 comp_done = (struct ecore_spq_comp_done *)cookie;
128 comp_done->done = 0x1;
129 comp_done->fw_return_code = fw_return_code;
131 /* make update visible to waiting thread */
132 OSAL_SMP_WMB(p_hwfn->p_dev);
135 static enum _ecore_status_t __ecore_spq_block(struct ecore_hwfn *p_hwfn,
136 struct ecore_spq_entry *p_ent,
138 bool sleep_between_iter)
140 struct ecore_spq_comp_done *comp_done;
143 comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
144 iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
145 : SPQ_BLOCK_DELAY_MAX_ITER;
148 OSAL_POLL_MODE_DPC(p_hwfn);
149 OSAL_SMP_RMB(p_hwfn->p_dev);
150 if (comp_done->done == 1) {
152 *p_fw_ret = comp_done->fw_return_code;
153 return ECORE_SUCCESS;
156 if (sleep_between_iter) {
157 OSAL_MSLEEP(SPQ_BLOCK_SLEEP_MS);
159 OSAL_UDELAY(SPQ_BLOCK_DELAY_US);
163 return ECORE_TIMEOUT;
166 static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
167 struct ecore_spq_entry *p_ent,
168 u8 *p_fw_ret, bool skip_quick_poll)
170 struct ecore_spq_comp_done *comp_done;
171 enum _ecore_status_t rc;
173 /* A relatively short polling period w/o sleeping, to allow the FW to
174 * complete the ramrod and thus possibly to avoid the following sleeps.
176 if (!skip_quick_poll) {
177 rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, false);
178 if (rc == ECORE_SUCCESS)
179 return ECORE_SUCCESS;
182 /* Move to polling with a sleeping period between iterations */
183 rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
184 if (rc == ECORE_SUCCESS)
185 return ECORE_SUCCESS;
187 DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
188 rc = ecore_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
189 if (rc != ECORE_SUCCESS) {
190 DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
194 /* Retry after drain */
195 rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
196 if (rc == ECORE_SUCCESS)
197 return ECORE_SUCCESS;
199 comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
200 if (comp_done->done == 1) {
202 *p_fw_ret = comp_done->fw_return_code;
203 return ECORE_SUCCESS;
206 DP_NOTICE(p_hwfn, true,
207 "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
208 OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
209 p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id,
210 OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
212 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
217 /***************************************************************************
218 * SPQ entries inner API
219 ***************************************************************************/
220 static enum _ecore_status_t ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn,
221 struct ecore_spq_entry *p_ent)
225 switch (p_ent->comp_mode) {
226 case ECORE_SPQ_MODE_EBLOCK:
227 case ECORE_SPQ_MODE_BLOCK:
228 p_ent->comp_cb.function = ecore_spq_blocking_cb;
230 case ECORE_SPQ_MODE_CB:
233 DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
238 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
239 "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
240 p_ent->elem.hdr.cid, p_ent->elem.hdr.cmd_id,
241 p_ent->elem.hdr.protocol_id,
242 p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
243 D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
244 ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
247 return ECORE_SUCCESS;
250 /***************************************************************************
252 ***************************************************************************/
253 static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
254 struct ecore_spq *p_spq)
256 struct ecore_cxt_info cxt_info;
257 struct core_conn_context *p_cxt;
258 enum _ecore_status_t rc;
261 cxt_info.iid = p_spq->cid;
263 rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
266 DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d\n",
271 p_cxt = cxt_info.p_cxt;
273 /* @@@TBD we zero the context until we have ilt_reset implemented. */
274 OSAL_MEM_ZERO(p_cxt, sizeof(*p_cxt));
276 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) {
277 SET_FIELD(p_cxt->xstorm_ag_context.flags10,
278 E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
279 SET_FIELD(p_cxt->xstorm_ag_context.flags1,
280 E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
281 /*SET_FIELD(p_cxt->xstorm_ag_context.flags10,
282 E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);*/
283 SET_FIELD(p_cxt->xstorm_ag_context.flags9,
284 E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
286 ECORE_E5_MISSING_CODE;
289 /* CDU validation - FIXME currently disabled */
291 /* QM physical queue */
292 physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
293 p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(physical_q);
295 p_cxt->xstorm_st_context.spq_base_lo =
296 DMA_LO_LE(p_spq->chain.p_phys_addr);
297 p_cxt->xstorm_st_context.spq_base_hi =
298 DMA_HI_LE(p_spq->chain.p_phys_addr);
300 DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
301 p_hwfn->p_consq->chain.p_phys_addr);
304 static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
305 struct ecore_spq *p_spq,
306 struct ecore_spq_entry *p_ent)
308 struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
309 u16 echo = ecore_chain_get_prod_idx(p_chain);
310 struct slow_path_element *elem;
311 struct core_db_data db;
313 p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
314 elem = ecore_chain_produce(p_chain);
316 DP_NOTICE(p_hwfn, true, "Failed to produce from SPQ chain\n");
320 *elem = p_ent->elem; /* struct assignment */
322 /* send a doorbell on the slow hwfn session */
323 OSAL_MEMSET(&db, 0, sizeof(db));
324 SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
325 SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
326 SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL, DQ_XCM_CORE_SPQ_PROD_CMD);
327 db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
328 db.spq_prod = OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
330 /* make sure the SPQE is updated before the doorbell */
331 OSAL_WMB(p_hwfn->p_dev);
333 DOORBELL(p_hwfn, DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
335 /* make sure doorbell is rang */
336 OSAL_WMB(p_hwfn->p_dev);
338 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
339 "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
340 DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), p_spq->cid, db.params,
341 db.agg_flags, ecore_chain_get_prod_idx(p_chain));
343 return ECORE_SUCCESS;
346 /***************************************************************************
347 * Asynchronous events
348 ***************************************************************************/
350 static enum _ecore_status_t
351 ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
352 struct event_ring_entry *p_eqe)
354 switch (p_eqe->protocol_id) {
355 #ifdef CONFIG_ECORE_ROCE
356 case PROTOCOLID_ROCE:
358 ecore_roce_async_event(p_hwfn,
360 &p_eqe->data.rdma_data);
361 return ECORE_SUCCESS;
363 #ifdef CONFIG_ECORE_IWARP
364 case PROTOCOLID_IWARP:
366 ecore_iwarp_async_event(p_hwfn,
368 &p_eqe->data.rdma_data.async_handle,
369 p_eqe->fw_return_code);
370 return ECORE_SUCCESS;
374 case PROTOCOLID_COMMON:
375 return ecore_sriov_eqe_event(p_hwfn,
379 #ifdef CONFIG_ECORE_ISCSI
380 case PROTOCOLID_ISCSI:
381 if (p_hwfn->p_iscsi_info->event_cb != OSAL_NULL) {
382 struct ecore_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
384 return p_iscsi->event_cb(p_iscsi->event_context,
385 p_eqe->opcode, &p_eqe->data);
388 false, "iSCSI async completion is not set\n");
389 return ECORE_NOTIMPL;
394 true, "Unknown Async completion for protocol: %d\n",
400 /***************************************************************************
402 ***************************************************************************/
403 void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn,
406 u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
407 USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
409 REG_WR16(p_hwfn, addr, prod);
411 /* keep prod updates ordered */
412 OSAL_MMIOWB(p_hwfn->p_dev);
415 enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
419 struct ecore_eq *p_eq = cookie;
420 struct ecore_chain *p_chain = &p_eq->chain;
421 enum _ecore_status_t rc = 0;
423 /* take a snapshot of the FW consumer */
424 u16 fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
426 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
428 /* Need to guarantee the fw_cons index we use points to a usuable
429 * element (to comply with our chain), so our macros would comply
431 if ((fw_cons_idx & ecore_chain_get_usable_per_page(p_chain)) ==
432 ecore_chain_get_usable_per_page(p_chain)) {
433 fw_cons_idx += ecore_chain_get_unusable_per_page(p_chain);
436 /* Complete current segment of eq entries */
437 while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) {
438 struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain);
446 "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
447 p_eqe->opcode, /* Event Opcode */
448 p_eqe->protocol_id, /* Event Protocol ID */
449 p_eqe->reserved0, /* Reserved */
450 OSAL_LE16_TO_CPU(p_eqe->echo),/* Echo value from
451 ramrod data on the host
453 p_eqe->fw_return_code, /* FW return code for SP
458 if (p_eqe->protocol_id == PROTOCOLID_ISCSI)
459 ecore_iscsi_eq_dump(p_hwfn, p_eqe);
462 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
463 if (ecore_async_event_completion(p_hwfn, p_eqe))
465 } else if (ecore_spq_completion(p_hwfn,
467 p_eqe->fw_return_code,
472 ecore_chain_recycle_consumed(p_chain);
475 ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(p_chain));
480 enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
482 struct ecore_eq *p_eq;
484 /* Allocate EQ struct */
485 p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq));
487 DP_NOTICE(p_hwfn, true,
488 "Failed to allocate `struct ecore_eq'\n");
492 /* Allocate and initialize EQ chain*/
493 if (ecore_chain_alloc(p_hwfn->p_dev,
494 ECORE_CHAIN_USE_TO_PRODUCE,
495 ECORE_CHAIN_MODE_PBL,
496 ECORE_CHAIN_CNT_TYPE_U16,
498 sizeof(union event_ring_element),
499 &p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) {
500 DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain\n");
501 goto eq_allocate_fail;
504 /* register EQ completion on the SP SB */
505 ecore_int_register_cb(p_hwfn, ecore_eq_completion,
506 p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
509 return ECORE_SUCCESS;
512 OSAL_FREE(p_hwfn->p_dev, p_eq);
516 void ecore_eq_setup(struct ecore_hwfn *p_hwfn)
518 ecore_chain_reset(&p_hwfn->p_eq->chain);
521 void ecore_eq_free(struct ecore_hwfn *p_hwfn)
526 ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_eq->chain);
528 OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_eq);
529 p_hwfn->p_eq = OSAL_NULL;
532 /***************************************************************************
533 * CQE API - manipulate EQ functionality
534 ***************************************************************************/
535 static enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn,
536 struct eth_slow_path_rx_cqe *cqe,
537 enum protocol_type protocol)
539 if (IS_VF(p_hwfn->p_dev))
540 return OSAL_VF_CQE_COMPLETION(p_hwfn, cqe, protocol);
542 /* @@@tmp - it's possible we'll eventually want to handle some
543 * actual commands that can arrive here, but for now this is only
544 * used to complete the ramrod using the echo value on the cqe
546 return ecore_spq_completion(p_hwfn, cqe->echo, 0, OSAL_NULL);
549 enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
550 struct eth_slow_path_rx_cqe *cqe)
552 enum _ecore_status_t rc;
554 rc = ecore_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
556 DP_NOTICE(p_hwfn, true,
557 "Failed to handle RXQ CQE [cmd 0x%02x]\n",
564 /***************************************************************************
565 * Slow hwfn Queue (spq)
566 ***************************************************************************/
567 void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
569 struct ecore_spq *p_spq = p_hwfn->p_spq;
570 struct ecore_spq_entry *p_virt = OSAL_NULL;
571 dma_addr_t p_phys = 0;
574 OSAL_LIST_INIT(&p_spq->pending);
575 OSAL_LIST_INIT(&p_spq->completion_pending);
576 OSAL_LIST_INIT(&p_spq->free_pool);
577 OSAL_LIST_INIT(&p_spq->unlimited_pending);
578 OSAL_SPIN_LOCK_INIT(&p_spq->lock);
581 p_phys = p_spq->p_phys + OFFSETOF(struct ecore_spq_entry, ramrod);
582 p_virt = p_spq->p_virt;
584 capacity = ecore_chain_get_capacity(&p_spq->chain);
585 for (i = 0; i < capacity; i++) {
586 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
588 OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool);
591 p_phys += sizeof(struct ecore_spq_entry);
595 p_spq->normal_count = 0;
596 p_spq->comp_count = 0;
597 p_spq->comp_sent_count = 0;
598 p_spq->unlimited_pending_count = 0;
600 OSAL_MEM_ZERO(p_spq->p_comp_bitmap,
601 SPQ_COMP_BMAP_SIZE * sizeof(unsigned long));
602 p_spq->comp_bitmap_idx = 0;
604 /* SPQ cid, cannot fail */
605 ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
606 ecore_spq_hw_initialize(p_hwfn, p_spq);
608 /* reset the chain itself */
609 ecore_chain_reset(&p_spq->chain);
612 enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
614 struct ecore_spq_entry *p_virt = OSAL_NULL;
615 struct ecore_spq *p_spq = OSAL_NULL;
616 dma_addr_t p_phys = 0;
621 OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
623 DP_NOTICE(p_hwfn, true, "Failed to allocate `struct ecore_spq'\n");
628 if (ecore_chain_alloc(p_hwfn->p_dev,
629 ECORE_CHAIN_USE_TO_PRODUCE,
630 ECORE_CHAIN_MODE_SINGLE,
631 ECORE_CHAIN_CNT_TYPE_U16,
632 0, /* N/A when the mode is SINGLE */
633 sizeof(struct slow_path_element),
634 &p_spq->chain, OSAL_NULL)) {
635 DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain\n");
636 goto spq_allocate_fail;
639 /* allocate and fill the SPQ elements (incl. ramrod data list) */
640 capacity = ecore_chain_get_capacity(&p_spq->chain);
641 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys,
643 sizeof(struct ecore_spq_entry));
645 goto spq_allocate_fail;
648 p_spq->p_virt = p_virt;
649 p_spq->p_phys = p_phys;
651 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock);
653 p_hwfn->p_spq = p_spq;
654 return ECORE_SUCCESS;
657 ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
658 OSAL_FREE(p_hwfn->p_dev, p_spq);
662 void ecore_spq_free(struct ecore_hwfn *p_hwfn)
664 struct ecore_spq *p_spq = p_hwfn->p_spq;
671 capacity = ecore_chain_get_capacity(&p_spq->chain);
672 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
676 sizeof(struct ecore_spq_entry));
679 ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
680 OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
682 OSAL_FREE(p_hwfn->p_dev, p_spq);
683 p_hwfn->p_spq = OSAL_NULL;
686 enum _ecore_status_t ecore_spq_get_entry(struct ecore_hwfn *p_hwfn,
687 struct ecore_spq_entry **pp_ent)
689 struct ecore_spq *p_spq = p_hwfn->p_spq;
690 struct ecore_spq_entry *p_ent = OSAL_NULL;
691 enum _ecore_status_t rc = ECORE_SUCCESS;
693 OSAL_SPIN_LOCK(&p_spq->lock);
695 if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
697 p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent));
699 DP_NOTICE(p_hwfn, true, "Failed to allocate an SPQ entry for a pending ramrod\n");
703 p_ent->queue = &p_spq->unlimited_pending;
705 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
706 struct ecore_spq_entry,
708 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->free_pool);
709 p_ent->queue = &p_spq->pending;
715 OSAL_SPIN_UNLOCK(&p_spq->lock);
719 /* Locked variant; Should be called while the SPQ lock is taken */
720 static void __ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
721 struct ecore_spq_entry *p_ent)
723 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_hwfn->p_spq->free_pool);
726 void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
727 struct ecore_spq_entry *p_ent)
729 OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
730 __ecore_spq_return_entry(p_hwfn, p_ent);
731 OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
735 * @brief ecore_spq_add_entry - adds a new entry to the pending
736 * list. Should be used while lock is being held.
738 * Addes an entry to the pending list is there is room (en empty
739 * element is available in the free_pool), or else places the
740 * entry in the unlimited_pending pool.
746 * @return enum _ecore_status_t
748 static enum _ecore_status_t ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
749 struct ecore_spq_entry *p_ent,
750 enum spq_priority priority)
752 struct ecore_spq *p_spq = p_hwfn->p_spq;
754 if (p_ent->queue == &p_spq->unlimited_pending) {
755 if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
757 OSAL_LIST_PUSH_TAIL(&p_ent->list,
758 &p_spq->unlimited_pending);
759 p_spq->unlimited_pending_count++;
761 return ECORE_SUCCESS;
764 struct ecore_spq_entry *p_en2;
766 p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
767 struct ecore_spq_entry,
769 OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool);
771 /* Copy the ring element physical pointer to the new
772 * entry, since we are about to override the entire ring
773 * entry and don't want to lose the pointer.
775 p_ent->elem.data_ptr = p_en2->elem.data_ptr;
779 /* EBLOCK responsible to free the allocated p_ent */
780 if (p_ent->comp_mode != ECORE_SPQ_MODE_EBLOCK)
781 OSAL_FREE(p_hwfn->p_dev, p_ent);
787 /* entry is to be placed in 'pending' queue */
789 case ECORE_SPQ_PRIORITY_NORMAL:
790 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->pending);
791 p_spq->normal_count++;
793 case ECORE_SPQ_PRIORITY_HIGH:
794 OSAL_LIST_PUSH_HEAD(&p_ent->list, &p_spq->pending);
801 return ECORE_SUCCESS;
804 /***************************************************************************
806 ***************************************************************************/
808 u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn)
810 if (!p_hwfn->p_spq) {
811 return 0xffffffff; /* illegal */
813 return p_hwfn->p_spq->cid;
816 /***************************************************************************
817 * Posting new Ramrods
818 ***************************************************************************/
820 static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
824 struct ecore_spq *p_spq = p_hwfn->p_spq;
825 enum _ecore_status_t rc;
827 /* TODO - implementation might be wasteful; will always keep room
828 * for an additional high priority ramrod (even if one is already
831 while (ecore_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
832 !OSAL_LIST_IS_EMPTY(head)) {
833 struct ecore_spq_entry *p_ent =
834 OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list);
835 if (p_ent != OSAL_NULL) {
837 #pragma warning(suppress : 6011 28182)
839 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
840 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->completion_pending);
841 p_spq->comp_sent_count++;
843 rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
845 OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
846 &p_spq->completion_pending);
847 __ecore_spq_return_entry(p_hwfn, p_ent);
853 return ECORE_SUCCESS;
856 static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
858 struct ecore_spq *p_spq = p_hwfn->p_spq;
859 struct ecore_spq_entry *p_ent = OSAL_NULL;
861 while (!OSAL_LIST_IS_EMPTY(&p_spq->free_pool))
863 if (OSAL_LIST_IS_EMPTY(&p_spq->unlimited_pending))
866 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->unlimited_pending,
867 struct ecore_spq_entry,
873 #pragma warning(suppress : 6011)
875 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending);
877 ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
880 return ecore_spq_post_list(p_hwfn, &p_spq->pending,
881 SPQ_HIGH_PRI_RESERVE_DEFAULT);
884 enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
885 struct ecore_spq_entry *p_ent,
888 enum _ecore_status_t rc = ECORE_SUCCESS;
889 struct ecore_spq *p_spq = p_hwfn ? p_hwfn->p_spq : OSAL_NULL;
890 bool b_ret_ent = true;
896 DP_NOTICE(p_hwfn, true, "Got a NULL pointer\n");
900 if (p_hwfn->p_dev->recov_in_prog) {
901 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
902 "Recovery is in progress -> skip spq post [cmd %02x protocol %02x]\n",
903 p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
904 /* Return success to let the flows to be completed successfully
905 * w/o any error handling.
907 return ECORE_SUCCESS;
910 OSAL_SPIN_LOCK(&p_spq->lock);
912 /* Complete the entry */
913 rc = ecore_spq_fill_entry(p_hwfn, p_ent);
915 /* Check return value after LOCK is taken for cleaner error flow */
919 /* Add the request to the pending queue */
920 rc = ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
924 rc = ecore_spq_pend_post(p_hwfn);
926 /* Since it's possible that pending failed for a different
927 * entry [although unlikely], the failed entry was already
928 * dealt with; No need to return it here.
934 OSAL_SPIN_UNLOCK(&p_spq->lock);
936 if (p_ent->comp_mode == ECORE_SPQ_MODE_EBLOCK) {
937 /* For entries in ECORE BLOCK mode, the completion code cannot
938 * perform the necessary cleanup - if it did, we couldn't
939 * access p_ent here to see whether it's successful or not.
940 * Thus, after gaining the answer - perform the cleanup here.
942 rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code,
943 p_ent->queue == &p_spq->unlimited_pending);
945 if (p_ent->queue == &p_spq->unlimited_pending) {
946 /* This is an allocated p_ent which does not need to
949 OSAL_FREE(p_hwfn->p_dev, p_ent);
951 /* TBD: handle error flow and remove p_ent from
961 ecore_spq_return_entry(p_hwfn, p_ent);
966 OSAL_SPIN_LOCK(&p_spq->lock);
967 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->completion_pending);
968 ecore_chain_return_produced(&p_spq->chain);
971 /* return to the free pool */
973 __ecore_spq_return_entry(p_hwfn, p_ent);
974 OSAL_SPIN_UNLOCK(&p_spq->lock);
979 enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
982 union event_ring_data *p_data)
984 struct ecore_spq *p_spq;
985 struct ecore_spq_entry *p_ent = OSAL_NULL;
986 struct ecore_spq_entry *tmp;
987 struct ecore_spq_entry *found = OSAL_NULL;
988 enum _ecore_status_t rc;
994 p_spq = p_hwfn->p_spq;
999 OSAL_SPIN_LOCK(&p_spq->lock);
1000 OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent,
1002 &p_spq->completion_pending,
1004 struct ecore_spq_entry) {
1006 if (p_ent->elem.hdr.echo == echo) {
1007 OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
1008 &p_spq->completion_pending);
1010 /* Avoid overriding of SPQ entries when getting
1011 * out-of-order completions, by marking the completions
1012 * in a bitmap and increasing the chain consumer only
1013 * for the first successive completed entries.
1015 SPQ_COMP_BMAP_SET_BIT(p_spq, echo);
1016 while (SPQ_COMP_BMAP_TEST_BIT(p_spq,
1017 p_spq->comp_bitmap_idx)) {
1018 SPQ_COMP_BMAP_CLEAR_BIT(p_spq,
1019 p_spq->comp_bitmap_idx);
1020 p_spq->comp_bitmap_idx++;
1021 ecore_chain_return_produced(&p_spq->chain);
1024 p_spq->comp_count++;
1029 /* This is debug and should be relatively uncommon - depends
1030 * on scenarios which have mutliple per-PF sent ramrods.
1032 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
1033 "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
1034 OSAL_LE16_TO_CPU(echo),
1035 OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
1038 /* Release lock before callback, as callback may post
1039 * an additional ramrod.
1041 OSAL_SPIN_UNLOCK(&p_spq->lock);
1044 DP_NOTICE(p_hwfn, true,
1045 "Failed to find an entry this EQE [echo %04x] completes\n",
1046 OSAL_LE16_TO_CPU(echo));
1047 return ECORE_EXISTS;
1050 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
1051 "Complete EQE [echo %04x]: func %p cookie %p)\n",
1052 OSAL_LE16_TO_CPU(echo),
1053 p_ent->comp_cb.function, p_ent->comp_cb.cookie);
1054 if (found->comp_cb.function)
1055 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
1058 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Got a completion without a callback function\n");
1060 if ((found->comp_mode != ECORE_SPQ_MODE_EBLOCK) ||
1061 (found->queue == &p_spq->unlimited_pending))
1062 /* EBLOCK is responsible for returning its own entry into the
1063 * free list, unless it originally added the entry into the
1064 * unlimited pending list.
1066 ecore_spq_return_entry(p_hwfn, found);
1068 /* Attempt to post pending requests */
1069 OSAL_SPIN_LOCK(&p_spq->lock);
1070 rc = ecore_spq_pend_post(p_hwfn);
1071 OSAL_SPIN_UNLOCK(&p_spq->lock);
1076 enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
1078 struct ecore_consq *p_consq;
1080 /* Allocate ConsQ struct */
1081 p_consq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_consq));
1083 DP_NOTICE(p_hwfn, true,
1084 "Failed to allocate `struct ecore_consq'\n");
1088 /* Allocate and initialize EQ chain*/
1089 if (ecore_chain_alloc(p_hwfn->p_dev,
1090 ECORE_CHAIN_USE_TO_PRODUCE,
1091 ECORE_CHAIN_MODE_PBL,
1092 ECORE_CHAIN_CNT_TYPE_U16,
1093 ECORE_CHAIN_PAGE_SIZE/0x80,
1095 &p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) {
1096 DP_NOTICE(p_hwfn, true, "Failed to allocate consq chain");
1097 goto consq_allocate_fail;
1100 p_hwfn->p_consq = p_consq;
1101 return ECORE_SUCCESS;
1103 consq_allocate_fail:
1104 OSAL_FREE(p_hwfn->p_dev, p_consq);
1108 void ecore_consq_setup(struct ecore_hwfn *p_hwfn)
1110 ecore_chain_reset(&p_hwfn->p_consq->chain);
1113 void ecore_consq_free(struct ecore_hwfn *p_hwfn)
1115 if (!p_hwfn->p_consq)
1118 ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_consq->chain);
1120 OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_consq);
1121 p_hwfn->p_consq = OSAL_NULL;