2 * Copyright (c) 2018-2019 Cavium, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
36 #include "ecore_status.h"
37 #include "ecore_sp_commands.h"
38 #include "ecore_cxt.h"
39 #include "ecore_rdma.h"
41 #include "ecore_rt_defs.h"
42 #include "ecore_init_ops.h"
44 #include "ecore_mcp.h"
45 #include "ecore_init_fw_funcs.h"
46 #include "ecore_int.h"
47 #include "pcics_reg_driver.h"
48 #include "ecore_iro.h"
49 #include "ecore_gtt_reg_addr.h"
51 #include "ecore_tcp_ip.h"
56 #pragma warning(disable : 28167)
57 #pragma warning(disable : 28123)
58 #pragma warning(disable : 28182)
59 #pragma warning(disable : 6011)
62 static void ecore_roce_free_icid(struct ecore_hwfn *p_hwfn, u16 icid);
64 static enum _ecore_status_t
65 ecore_roce_async_event(struct ecore_hwfn *p_hwfn,
68 union event_ring_data *data,
69 u8 OSAL_UNUSED fw_return_code)
71 if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) {
72 u16 icid = (u16)OSAL_LE32_TO_CPU(
73 data->rdma_data.rdma_destroy_qp_data.cid);
75 /* icid release in this async event can occur only if the icid
76 * was offloaded to the FW. In case it wasn't offloaded this is
77 * handled in ecore_roce_sp_destroy_qp.
79 ecore_roce_free_icid(p_hwfn, icid);
81 p_hwfn->p_rdma_info->events.affiliated_event(
82 p_hwfn->p_rdma_info->events.context,
84 (void *)&data->rdma_data.async_handle);
92 static enum _ecore_status_t ecore_roce_start_rl(
93 struct ecore_hwfn *p_hwfn,
94 struct ecore_roce_dcqcn_params *dcqcn_params)
96 struct ecore_rl_update_params params;
98 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "\n");
99 OSAL_MEMSET(¶ms, 0, sizeof(params));
101 params.rl_id_first = (u8)RESC_START(p_hwfn, ECORE_RL);
102 params.rl_id_last = RESC_START(p_hwfn, ECORE_RL) +
103 ecore_init_qm_get_num_pf_rls(p_hwfn);
104 params.dcqcn_update_param_flg = 1;
105 params.rl_init_flg = 1;
106 params.rl_start_flg = 1;
107 params.rl_stop_flg = 0;
108 params.rl_dc_qcn_flg = 1;
110 params.rl_bc_rate = dcqcn_params->rl_bc_rate;
111 params.rl_max_rate = dcqcn_params->rl_max_rate;
112 params.rl_r_ai = dcqcn_params->rl_r_ai;
113 params.rl_r_hai = dcqcn_params->rl_r_hai;
114 params.dcqcn_gd = dcqcn_params->dcqcn_gd;
115 params.dcqcn_k_us = dcqcn_params->dcqcn_k_us;
116 params.dcqcn_timeuot_us = dcqcn_params->dcqcn_timeout_us;
118 return ecore_sp_rl_update(p_hwfn, ¶ms);
121 enum _ecore_status_t ecore_roce_stop_rl(struct ecore_hwfn *p_hwfn)
123 struct ecore_rl_update_params params;
125 if (!p_hwfn->p_rdma_info->roce.dcqcn_reaction_point)
126 return ECORE_SUCCESS;
128 OSAL_MEMSET(¶ms, 0, sizeof(params));
129 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "\n");
131 params.rl_id_first = (u8)RESC_START(p_hwfn, ECORE_RL);
132 params.rl_id_last = RESC_START(p_hwfn, ECORE_RL) +
133 ecore_init_qm_get_num_pf_rls(p_hwfn);
134 params.rl_stop_flg = 1;
136 return ecore_sp_rl_update(p_hwfn, ¶ms);
139 #define NIG_REG_ROCE_DUPLICATE_TO_HOST_BTH 2
140 #define NIG_REG_ROCE_DUPLICATE_TO_HOST_ECN 1
142 enum _ecore_status_t ecore_roce_dcqcn_cfg(
143 struct ecore_hwfn *p_hwfn,
144 struct ecore_roce_dcqcn_params *params,
145 struct roce_init_func_ramrod_data *p_ramrod,
146 struct ecore_ptt *p_ptt)
149 enum _ecore_status_t rc = ECORE_SUCCESS;
151 if (!p_hwfn->pf_params.rdma_pf_params.enable_dcqcn ||
152 p_hwfn->p_rdma_info->proto == PROTOCOLID_IWARP)
155 p_hwfn->p_rdma_info->roce.dcqcn_enabled = 0;
156 if (params->notification_point) {
157 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
158 "Configuring dcqcn notification point: timeout = 0x%x\n",
159 params->cnp_send_timeout);
160 p_ramrod->roce.cnp_send_timeout = params->cnp_send_timeout;
161 p_hwfn->p_rdma_info->roce.dcqcn_enabled = 1;
162 /* Configure NIG to duplicate to host and storm when:
163 * - (ECN == 2'b11 (notification point)
165 val |= 1 << NIG_REG_ROCE_DUPLICATE_TO_HOST_ECN;
168 if (params->reaction_point) {
169 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
170 "Configuring dcqcn reaction point\n");
171 p_hwfn->p_rdma_info->roce.dcqcn_enabled = 1;
172 p_hwfn->p_rdma_info->roce.dcqcn_reaction_point = 1;
173 /* Configure NIG to duplicate to host and storm when:
174 * - BTH opcode equals bth_hdr_flow_ctrl_opcode_2
177 val |= 1 << NIG_REG_ROCE_DUPLICATE_TO_HOST_BTH;
179 rc = ecore_roce_start_rl(p_hwfn, params);
185 p_ramrod->roce.cnp_dscp = params->cnp_dscp;
186 p_ramrod->roce.cnp_vlan_priority = params->cnp_vlan_priority;
190 NIG_REG_ROCE_DUPLICATE_TO_HOST,
198 enum _ecore_status_t ecore_roce_stop(struct ecore_hwfn *p_hwfn)
200 struct ecore_bmap *cid_map = &p_hwfn->p_rdma_info->cid_map;
203 /* when destroying a_RoCE QP the control is returned to the
204 * user after the synchronous part. The asynchronous part may
205 * take a little longer. We delay for a short while if an
206 * asyn destroy QP is still expected. Beyond the added delay
207 * we clear the bitmap anyway.
209 while (OSAL_BITMAP_WEIGHT(cid_map->bitmap, cid_map->max_count)) {
211 if (wait_count++ > 20) {
212 DP_NOTICE(p_hwfn, false,
213 "cid bitmap wait timed out\n");
218 ecore_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ROCE);
220 return ECORE_SUCCESS;
224 static void ecore_rdma_copy_gids(struct ecore_rdma_qp *qp, __le32 *src_gid,
228 if (qp->roce_mode == ROCE_V2_IPV4) {
229 /* The IPv4 addresses shall be aligned to the highest word.
230 * The lower words must be zero.
232 OSAL_MEMSET(src_gid, 0, sizeof(union ecore_gid));
233 OSAL_MEMSET(dst_gid, 0, sizeof(union ecore_gid));
234 src_gid[3] = OSAL_CPU_TO_LE32(qp->sgid.ipv4_addr);
235 dst_gid[3] = OSAL_CPU_TO_LE32(qp->dgid.ipv4_addr);
237 /* RoCE, and RoCE v2 - IPv6: GIDs and IPv6 addresses coincide in
240 for (i = 0; i < OSAL_ARRAY_SIZE(qp->sgid.dwords); i++) {
241 src_gid[i] = OSAL_CPU_TO_LE32(qp->sgid.dwords[i]);
242 dst_gid[i] = OSAL_CPU_TO_LE32(qp->dgid.dwords[i]);
247 static enum roce_flavor ecore_roce_mode_to_flavor(enum roce_mode roce_mode)
249 enum roce_flavor flavor;
259 flavor = (enum roce_flavor)ROCE_V2_IPV6;
262 flavor = (enum roce_flavor)MAX_ROCE_MODE;
269 static void ecore_roce_free_cid_pair(struct ecore_hwfn *p_hwfn, u16 cid)
271 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
272 ecore_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->qp_map, cid);
273 ecore_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->qp_map, cid + 1);
274 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
278 static void ecore_roce_free_qp(struct ecore_hwfn *p_hwfn, u16 qp_idx)
280 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
281 ecore_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->qp_map, qp_idx);
282 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
285 #define ECORE_ROCE_CREATE_QP_ATTEMPTS (20)
286 #define ECORE_ROCE_CREATE_QP_MSLEEP (10)
288 static enum _ecore_status_t ecore_roce_wait_free_cids(struct ecore_hwfn *p_hwfn, u32 qp_idx)
290 struct ecore_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
291 bool cids_free = false;
295 icid = ECORE_ROCE_QP_TO_ICID(qp_idx);
297 /* Make sure that the cids that were used by the QP index are free.
298 * This is necessary because the destroy flow returns to the user before
299 * the device finishes clean up.
300 * It can happen in the following flows:
301 * (1) ib_destroy_qp followed by an ib_create_qp
302 * (2) ib_modify_qp to RESET followed (not immediately), by an
303 * ib_modify_qp to RTR
307 OSAL_SPIN_LOCK(&p_rdma_info->lock);
308 resp = ecore_bmap_test_id(p_hwfn, &p_rdma_info->cid_map, icid);
309 req = ecore_bmap_test_id(p_hwfn, &p_rdma_info->cid_map, icid + 1);
313 OSAL_SPIN_UNLOCK(&p_rdma_info->lock);
316 OSAL_MSLEEP(ECORE_ROCE_CREATE_QP_MSLEEP);
319 } while (!cids_free && iter < ECORE_ROCE_CREATE_QP_ATTEMPTS);
322 DP_ERR(p_hwfn->p_dev,
323 "responder and/or requester CIDs are still in use. resp=%d, req=%d\n",
328 return ECORE_SUCCESS;
331 enum _ecore_status_t ecore_roce_alloc_qp_idx(
332 struct ecore_hwfn *p_hwfn, u16 *qp_idx16)
334 struct ecore_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
335 u32 start_cid, icid, cid, qp_idx;
336 enum _ecore_status_t rc;
338 OSAL_SPIN_LOCK(&p_rdma_info->lock);
339 rc = ecore_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->qp_map, &qp_idx);
340 if (rc != ECORE_SUCCESS) {
341 DP_NOTICE(p_hwfn, false, "failed to allocate qp\n");
342 OSAL_SPIN_UNLOCK(&p_rdma_info->lock);
346 OSAL_SPIN_UNLOCK(&p_rdma_info->lock);
348 /* Verify the cid bits that of this qp index are clear */
349 rc = ecore_roce_wait_free_cids(p_hwfn, qp_idx);
351 rc = ECORE_UNKNOWN_ERROR;
355 /* Allocate a DMA-able context for an ILT page, if not existing, for the
357 * Note: If second allocation fails there's no need to free the first as
358 * it will be used in the future.
360 icid = ECORE_ROCE_QP_TO_ICID(qp_idx);
361 start_cid = ecore_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto);
362 cid = start_cid + icid;
364 rc = ecore_cxt_dynamic_ilt_alloc(p_hwfn, ECORE_ELEM_CXT, cid);
365 if (rc != ECORE_SUCCESS)
368 rc = ecore_cxt_dynamic_ilt_alloc(p_hwfn, ECORE_ELEM_CXT, cid + 1);
369 if (rc != ECORE_SUCCESS)
372 /* qp index is under 2^16 */
373 *qp_idx16 = (u16)qp_idx;
375 return ECORE_SUCCESS;
378 ecore_roce_free_qp(p_hwfn, (u16)qp_idx);
380 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d\n", rc);
385 static void ecore_roce_set_cid(struct ecore_hwfn *p_hwfn,
388 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
389 ecore_bmap_set_id(p_hwfn,
390 &p_hwfn->p_rdma_info->cid_map,
392 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
395 static enum _ecore_status_t ecore_roce_sp_create_responder(
396 struct ecore_hwfn *p_hwfn,
397 struct ecore_rdma_qp *qp)
399 struct roce_create_qp_resp_ramrod_data *p_ramrod;
400 u16 regular_latency_queue, low_latency_queue;
401 struct ecore_sp_init_data init_data;
402 enum roce_flavor roce_flavor;
403 struct ecore_spq_entry *p_ent;
404 enum _ecore_status_t rc;
410 return ECORE_SUCCESS;
412 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "qp_idx = %08x\n", qp->qp_idx);
414 /* Allocate DMA-able memory for IRQ */
415 qp->irq_num_pages = 1;
416 qp->irq = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
418 RDMA_RING_PAGE_SIZE);
421 DP_NOTICE(p_hwfn, false,
422 "ecore create responder failed: cannot allocate memory (irq). rc = %d\n",
428 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
429 init_data.cid = qp->icid;
430 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
431 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
433 rc = ecore_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_QP,
434 PROTOCOLID_ROCE, &init_data);
435 if (rc != ECORE_SUCCESS)
438 p_ramrod = &p_ent->ramrod.roce_create_qp_resp;
442 roce_flavor = ecore_roce_mode_to_flavor(qp->roce_mode);
443 SET_FIELD(p_ramrod->flags,
444 ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR,
447 SET_FIELD(p_ramrod->flags,
448 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
449 qp->incoming_rdma_read_en);
451 SET_FIELD(p_ramrod->flags,
452 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
453 qp->incoming_rdma_write_en);
455 SET_FIELD(p_ramrod->flags,
456 ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN,
457 qp->incoming_atomic_en);
459 SET_FIELD(p_ramrod->flags,
460 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
461 qp->e2e_flow_control_en);
463 SET_FIELD(p_ramrod->flags,
464 ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG,
467 SET_FIELD(p_ramrod->flags,
468 ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN,
469 qp->fmr_and_reserved_lkey);
471 SET_FIELD(p_ramrod->flags,
472 ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG,
473 ecore_rdma_is_xrc_qp(qp));
475 /* TBD: future use only
476 * #define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_MASK
477 * #define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT
479 SET_FIELD(p_ramrod->flags,
480 ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
481 qp->min_rnr_nak_timer);
484 qp->max_rd_atomic_resp;
485 p_ramrod->traffic_class = qp->traffic_class_tos;
486 p_ramrod->hop_limit = qp->hop_limit_ttl;
487 p_ramrod->irq_num_pages = qp->irq_num_pages;
488 p_ramrod->p_key = OSAL_CPU_TO_LE16(qp->pkey);
489 p_ramrod->flow_label = OSAL_CPU_TO_LE32(qp->flow_label);
490 p_ramrod->dst_qp_id = OSAL_CPU_TO_LE32(qp->dest_qp);
491 p_ramrod->mtu = OSAL_CPU_TO_LE16(qp->mtu);
492 p_ramrod->initial_psn = OSAL_CPU_TO_LE32(qp->rq_psn);
493 p_ramrod->pd = OSAL_CPU_TO_LE16(qp->pd);
494 p_ramrod->rq_num_pages = OSAL_CPU_TO_LE16(qp->rq_num_pages);
495 DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr);
496 DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr);
497 ecore_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
498 p_ramrod->qp_handle_for_async.hi =
499 OSAL_CPU_TO_LE32(qp->qp_handle_async.hi);
500 p_ramrod->qp_handle_for_async.lo =
501 OSAL_CPU_TO_LE32(qp->qp_handle_async.lo);
502 p_ramrod->qp_handle_for_cqe.hi = OSAL_CPU_TO_LE32(qp->qp_handle.hi);
503 p_ramrod->qp_handle_for_cqe.lo = OSAL_CPU_TO_LE32(qp->qp_handle.lo);
504 p_ramrod->cq_cid = OSAL_CPU_TO_LE32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id);
505 p_ramrod->xrc_domain = OSAL_CPU_TO_LE16(qp->xrcd_id);
508 /* when dcqcn is enabled physical queues are determined accoridng to qp id */
509 if (p_hwfn->p_rdma_info->roce.dcqcn_enabled)
510 regular_latency_queue =
511 ecore_get_cm_pq_idx_rl(p_hwfn,
513 ROCE_DCQCN_RP_MAX_QPS);
516 regular_latency_queue = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
517 low_latency_queue = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LLT);
519 p_ramrod->regular_latency_phy_queue = OSAL_CPU_TO_LE16(regular_latency_queue);
520 p_ramrod->low_latency_phy_queue = OSAL_CPU_TO_LE16(low_latency_queue);
521 p_ramrod->dpi = OSAL_CPU_TO_LE16(qp->dpi);
523 ecore_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
524 ecore_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
526 p_ramrod->udp_src_port = qp->udp_src_port;
527 p_ramrod->vlan_id = OSAL_CPU_TO_LE16(qp->vlan_id);
528 is_xrc = ecore_rdma_is_xrc_qp(qp);
529 fw_srq_id = ecore_rdma_get_fw_srq_id(p_hwfn, qp->srq_id, is_xrc);
530 p_ramrod->srq_id.srq_idx = OSAL_CPU_TO_LE16(fw_srq_id);
531 p_ramrod->srq_id.opaque_fid = OSAL_CPU_TO_LE16(p_hwfn->hw_info.opaque_fid);
533 p_ramrod->stats_counter_id = RESC_START(p_hwfn, ECORE_RDMA_STATS_QUEUE) +
536 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
538 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d regular physical queue = 0x%x, low latency physical queue 0x%x\n",
539 rc, regular_latency_queue, low_latency_queue);
541 if (rc != ECORE_SUCCESS)
544 qp->resp_offloaded = true;
545 qp->cq_prod.resp = 0;
547 cid_start = ecore_cxt_get_proto_cid_start(p_hwfn,
548 p_hwfn->p_rdma_info->proto);
549 ecore_roce_set_cid(p_hwfn, qp->icid - cid_start);
554 DP_NOTICE(p_hwfn, false, "create responder - failed, rc = %d\n", rc);
555 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
559 RDMA_RING_PAGE_SIZE);
564 static enum _ecore_status_t ecore_roce_sp_create_requester(
565 struct ecore_hwfn *p_hwfn,
566 struct ecore_rdma_qp *qp)
568 struct roce_create_qp_req_ramrod_data *p_ramrod;
569 u16 regular_latency_queue, low_latency_queue;
570 struct ecore_sp_init_data init_data;
571 enum roce_flavor roce_flavor;
572 struct ecore_spq_entry *p_ent;
573 enum _ecore_status_t rc;
577 return ECORE_SUCCESS;
579 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "icid = %08x\n", qp->icid);
581 /* Allocate DMA-able memory for ORQ */
582 qp->orq_num_pages = 1;
583 qp->orq = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
585 RDMA_RING_PAGE_SIZE);
589 DP_NOTICE(p_hwfn, false,
590 "ecore create requester failed: cannot allocate memory (orq). rc = %d\n",
596 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
597 init_data.cid = qp->icid + 1;
598 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
599 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
601 rc = ecore_sp_init_request(p_hwfn, &p_ent,
602 ROCE_RAMROD_CREATE_QP,
603 PROTOCOLID_ROCE, &init_data);
604 if (rc != ECORE_SUCCESS)
607 p_ramrod = &p_ent->ramrod.roce_create_qp_req;
611 roce_flavor = ecore_roce_mode_to_flavor(qp->roce_mode);
612 SET_FIELD(p_ramrod->flags,
613 ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR,
616 SET_FIELD(p_ramrod->flags,
617 ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN,
618 qp->fmr_and_reserved_lkey);
620 SET_FIELD(p_ramrod->flags,
621 ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP,
626 * #define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_MASK
627 * #define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_SHIFT
629 SET_FIELD(p_ramrod->flags,
630 ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT,
633 SET_FIELD(p_ramrod->flags,
634 ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
637 SET_FIELD(p_ramrod->flags,
638 ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG,
639 ecore_rdma_is_xrc_qp(qp));
641 p_ramrod->max_ord = qp->max_rd_atomic_req;
642 p_ramrod->traffic_class = qp->traffic_class_tos;
643 p_ramrod->hop_limit = qp->hop_limit_ttl;
644 p_ramrod->orq_num_pages = qp->orq_num_pages;
645 p_ramrod->p_key = OSAL_CPU_TO_LE16(qp->pkey);
646 p_ramrod->flow_label = OSAL_CPU_TO_LE32(qp->flow_label);
647 p_ramrod->dst_qp_id = OSAL_CPU_TO_LE32(qp->dest_qp);
648 p_ramrod->ack_timeout_val = OSAL_CPU_TO_LE32(qp->ack_timeout);
649 p_ramrod->mtu = OSAL_CPU_TO_LE16(qp->mtu);
650 p_ramrod->initial_psn = OSAL_CPU_TO_LE32(qp->sq_psn);
651 p_ramrod->pd = OSAL_CPU_TO_LE16(qp->pd);
652 p_ramrod->sq_num_pages = OSAL_CPU_TO_LE16(qp->sq_num_pages);
653 DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr);
654 DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr);
655 ecore_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
656 p_ramrod->qp_handle_for_async.hi =
657 OSAL_CPU_TO_LE32(qp->qp_handle_async.hi);
658 p_ramrod->qp_handle_for_async.lo =
659 OSAL_CPU_TO_LE32(qp->qp_handle_async.lo);
660 p_ramrod->qp_handle_for_cqe.hi = OSAL_CPU_TO_LE32(qp->qp_handle.hi);
661 p_ramrod->qp_handle_for_cqe.lo = OSAL_CPU_TO_LE32(qp->qp_handle.lo);
662 p_ramrod->cq_cid = OSAL_CPU_TO_LE32((p_hwfn->hw_info.opaque_fid << 16) |
666 /* when dcqcn is enabled physical queues are determined accoridng to qp id */
667 if (p_hwfn->p_rdma_info->roce.dcqcn_enabled)
668 regular_latency_queue =
669 ecore_get_cm_pq_idx_rl(p_hwfn,
671 ROCE_DCQCN_RP_MAX_QPS);
674 regular_latency_queue = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
675 low_latency_queue = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LLT);
677 p_ramrod->regular_latency_phy_queue = OSAL_CPU_TO_LE16(regular_latency_queue);
678 p_ramrod->low_latency_phy_queue = OSAL_CPU_TO_LE16(low_latency_queue);
679 p_ramrod->dpi = OSAL_CPU_TO_LE16(qp->dpi);
681 ecore_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
682 ecore_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
684 p_ramrod->udp_src_port = qp->udp_src_port;
685 p_ramrod->vlan_id = OSAL_CPU_TO_LE16(qp->vlan_id);
686 p_ramrod->stats_counter_id = RESC_START(p_hwfn, ECORE_RDMA_STATS_QUEUE) +
689 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
691 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d\n", rc);
693 if (rc != ECORE_SUCCESS)
696 qp->req_offloaded = true;
699 cid_start = ecore_cxt_get_proto_cid_start(p_hwfn,
700 p_hwfn->p_rdma_info->proto);
701 ecore_roce_set_cid(p_hwfn, qp->icid + 1 - cid_start);
706 DP_NOTICE(p_hwfn, false, "Create requested - failed, rc = %d\n", rc);
707 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
711 RDMA_RING_PAGE_SIZE);
715 static enum _ecore_status_t ecore_roce_sp_modify_responder(
716 struct ecore_hwfn *p_hwfn,
717 struct ecore_rdma_qp *qp,
721 struct roce_modify_qp_resp_ramrod_data *p_ramrod;
722 struct ecore_sp_init_data init_data;
723 struct ecore_spq_entry *p_ent;
724 enum _ecore_status_t rc;
727 return ECORE_SUCCESS;
729 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "icid = %08x\n", qp->icid);
731 if (move_to_err && !qp->resp_offloaded)
732 return ECORE_SUCCESS;
735 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
736 init_data.cid = qp->icid;
737 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
738 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
740 rc = ecore_sp_init_request(p_hwfn, &p_ent,
741 ROCE_EVENT_MODIFY_QP,
742 PROTOCOLID_ROCE, &init_data);
743 if (rc != ECORE_SUCCESS)
745 DP_NOTICE(p_hwfn, false, "rc = %d\n", rc);
749 p_ramrod = &p_ent->ramrod.roce_modify_qp_resp;
753 SET_FIELD(p_ramrod->flags,
754 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG,
757 SET_FIELD(p_ramrod->flags,
758 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
759 qp->incoming_rdma_read_en);
761 SET_FIELD(p_ramrod->flags,
762 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
763 qp->incoming_rdma_write_en);
765 SET_FIELD(p_ramrod->flags,
766 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN,
767 qp->incoming_atomic_en);
769 SET_FIELD(p_ramrod->flags,
770 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
771 qp->e2e_flow_control_en);
773 SET_FIELD(p_ramrod->flags,
774 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG,
775 GET_FIELD(modify_flags,
776 ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN));
778 SET_FIELD(p_ramrod->flags,
779 ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG,
780 GET_FIELD(modify_flags, ECORE_ROCE_MODIFY_QP_VALID_PKEY));
782 SET_FIELD(p_ramrod->flags,
783 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG,
784 GET_FIELD(modify_flags,
785 ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
787 SET_FIELD(p_ramrod->flags,
788 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG,
789 GET_FIELD(modify_flags,
790 ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP));
792 /* TBD: future use only
793 * #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_MASK
794 * #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_SHIFT
797 SET_FIELD(p_ramrod->flags,
798 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG,
799 GET_FIELD(modify_flags,
800 ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER));
802 p_ramrod->fields = 0;
803 SET_FIELD(p_ramrod->fields,
804 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
805 qp->min_rnr_nak_timer);
807 p_ramrod->max_ird = qp->max_rd_atomic_resp;
808 p_ramrod->traffic_class = qp->traffic_class_tos;
809 p_ramrod->hop_limit = qp->hop_limit_ttl;
810 p_ramrod->p_key = OSAL_CPU_TO_LE16(qp->pkey);
811 p_ramrod->flow_label = OSAL_CPU_TO_LE32(qp->flow_label);
812 p_ramrod->mtu = OSAL_CPU_TO_LE16(qp->mtu);
813 ecore_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
814 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
816 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Modify responder, rc = %d\n", rc);
820 static enum _ecore_status_t ecore_roce_sp_modify_requester(
821 struct ecore_hwfn *p_hwfn,
822 struct ecore_rdma_qp *qp,
827 struct roce_modify_qp_req_ramrod_data *p_ramrod;
828 struct ecore_sp_init_data init_data;
829 struct ecore_spq_entry *p_ent;
830 enum _ecore_status_t rc;
833 return ECORE_SUCCESS;
835 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "icid = %08x\n", qp->icid);
837 if (move_to_err && !(qp->req_offloaded))
838 return ECORE_SUCCESS;
841 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
842 init_data.cid = qp->icid + 1;
843 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
844 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
846 rc = ecore_sp_init_request(p_hwfn, &p_ent,
847 ROCE_EVENT_MODIFY_QP,
848 PROTOCOLID_ROCE, &init_data);
849 if (rc != ECORE_SUCCESS) {
850 DP_NOTICE(p_hwfn, false, "rc = %d\n", rc);
854 p_ramrod = &p_ent->ramrod.roce_modify_qp_req;
858 SET_FIELD(p_ramrod->flags,
859 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG,
862 SET_FIELD(p_ramrod->flags,
863 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG,
866 SET_FIELD(p_ramrod->flags,
867 ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY,
870 SET_FIELD(p_ramrod->flags,
871 ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG,
872 GET_FIELD(modify_flags, ECORE_ROCE_MODIFY_QP_VALID_PKEY));
874 SET_FIELD(p_ramrod->flags,
875 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG,
876 GET_FIELD(modify_flags,
877 ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
879 SET_FIELD(p_ramrod->flags,
880 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG,
881 GET_FIELD(modify_flags,
882 ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ));
884 SET_FIELD(p_ramrod->flags,
885 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG,
886 GET_FIELD(modify_flags,
887 ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT));
889 SET_FIELD(p_ramrod->flags,
890 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG,
891 GET_FIELD(modify_flags,
892 ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT));
894 SET_FIELD(p_ramrod->flags,
895 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG,
896 GET_FIELD(modify_flags,
897 ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT));
899 /* TBD: future use only
900 * #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_MASK
901 * #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_SHIFT
904 p_ramrod->fields = 0;
905 SET_FIELD(p_ramrod->fields,
906 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT,
909 SET_FIELD(p_ramrod->fields,
910 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
913 p_ramrod->max_ord = qp->max_rd_atomic_req;
914 p_ramrod->traffic_class = qp->traffic_class_tos;
915 p_ramrod->hop_limit = qp->hop_limit_ttl;
916 p_ramrod->p_key = OSAL_CPU_TO_LE16(qp->pkey);
917 p_ramrod->flow_label = OSAL_CPU_TO_LE32(qp->flow_label);
918 p_ramrod->ack_timeout_val = OSAL_CPU_TO_LE32(qp->ack_timeout);
919 p_ramrod->mtu = OSAL_CPU_TO_LE16(qp->mtu);
920 ecore_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
921 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
923 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Modify requester, rc = %d\n", rc);
927 static enum _ecore_status_t ecore_roce_sp_destroy_qp_responder(
928 struct ecore_hwfn *p_hwfn,
929 struct ecore_rdma_qp *qp,
930 u32 *num_invalidated_mw,
933 struct roce_destroy_qp_resp_output_params *p_ramrod_res;
934 struct roce_destroy_qp_resp_ramrod_data *p_ramrod;
935 struct ecore_sp_init_data init_data;
936 struct ecore_spq_entry *p_ent;
937 dma_addr_t ramrod_res_phys;
938 enum _ecore_status_t rc;
941 *num_invalidated_mw = 0;
943 return ECORE_SUCCESS;
946 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "icid = %08x\n", qp->icid);
948 *num_invalidated_mw = 0;
950 if (!qp->resp_offloaded) {
951 *cq_prod = qp->cq_prod.resp;
952 return ECORE_SUCCESS;
956 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
957 init_data.cid = qp->icid;
958 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
959 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
961 rc = ecore_sp_init_request(p_hwfn, &p_ent,
962 ROCE_RAMROD_DESTROY_QP,
963 PROTOCOLID_ROCE, &init_data);
964 if (rc != ECORE_SUCCESS)
967 p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp;
969 p_ramrod_res = (struct roce_destroy_qp_resp_output_params *)OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
970 &ramrod_res_phys, sizeof(*p_ramrod_res));
975 DP_NOTICE(p_hwfn, false,
976 "ecore destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
981 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
983 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
984 if (rc != ECORE_SUCCESS)
988 = OSAL_LE32_TO_CPU(p_ramrod_res->num_invalidated_mw);
989 *cq_prod = OSAL_LE32_TO_CPU(p_ramrod_res->cq_prod);
990 qp->cq_prod.resp = *cq_prod;
992 /* Free IRQ - only if ramrod succeeded, in case FW is still using it */
993 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
997 RDMA_RING_PAGE_SIZE);
999 qp->resp_offloaded = false;
1001 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Destroy responder, rc = %d\n", rc);
1003 /* "fall through" */
1006 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_ramrod_res, ramrod_res_phys,
1007 sizeof(*p_ramrod_res));
1012 static enum _ecore_status_t ecore_roce_sp_destroy_qp_requester(
1013 struct ecore_hwfn *p_hwfn,
1014 struct ecore_rdma_qp *qp,
1018 struct roce_destroy_qp_req_output_params *p_ramrod_res;
1019 struct roce_destroy_qp_req_ramrod_data *p_ramrod;
1020 struct ecore_sp_init_data init_data;
1021 struct ecore_spq_entry *p_ent;
1022 dma_addr_t ramrod_res_phys;
1023 enum _ecore_status_t rc;
1028 return ECORE_SUCCESS;
1031 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "icid = %08x\n", qp->icid);
1033 if (!qp->req_offloaded) {
1034 *cq_prod = qp->cq_prod.req;
1035 return ECORE_SUCCESS;
1038 p_ramrod_res = (struct roce_destroy_qp_req_output_params *)
1039 OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &ramrod_res_phys,
1040 sizeof(*p_ramrod_res));
1043 DP_NOTICE(p_hwfn, false,
1044 "ecore destroy requester failed: cannot allocate memory (ramrod)\n");
1049 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1050 init_data.cid = qp->icid + 1;
1051 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1052 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1054 rc = ecore_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_QP,
1055 PROTOCOLID_ROCE, &init_data);
1056 if (rc != ECORE_SUCCESS)
1059 p_ramrod = &p_ent->ramrod.roce_destroy_qp_req;
1060 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
1062 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1063 if (rc != ECORE_SUCCESS)
1066 *num_bound_mw = OSAL_LE32_TO_CPU(p_ramrod_res->num_bound_mw);
1067 *cq_prod = OSAL_LE32_TO_CPU(p_ramrod_res->cq_prod);
1068 qp->cq_prod.req = *cq_prod;
1070 /* Free ORQ - only if ramrod succeeded, in case FW is still using it */
1071 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
1075 RDMA_RING_PAGE_SIZE);
1077 qp->req_offloaded = false;
1079 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Destroy requester, rc = %d\n", rc);
1081 /* "fall through" */
1084 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_ramrod_res, ramrod_res_phys,
1085 sizeof(*p_ramrod_res));
1090 static OSAL_INLINE enum _ecore_status_t ecore_roce_sp_query_responder(
1091 struct ecore_hwfn *p_hwfn,
1092 struct ecore_rdma_qp *qp,
1093 struct ecore_rdma_query_qp_out_params *out_params)
1095 struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
1096 struct roce_query_qp_resp_ramrod_data *p_resp_ramrod;
1097 struct ecore_sp_init_data init_data;
1098 dma_addr_t resp_ramrod_res_phys;
1099 struct ecore_spq_entry *p_ent;
1100 enum _ecore_status_t rc = ECORE_SUCCESS;
1103 if (!qp->resp_offloaded) {
1104 /* Don't send query qp for the responder */
1105 out_params->rq_psn = qp->rq_psn;
1107 return ECORE_SUCCESS;
1110 /* Send a query responder ramrod to the FW */
1111 p_resp_ramrod_res = (struct roce_query_qp_resp_output_params *)
1112 OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &resp_ramrod_res_phys,
1113 sizeof(*p_resp_ramrod_res));
1114 if (!p_resp_ramrod_res)
1116 DP_NOTICE(p_hwfn, false,
1117 "ecore query qp failed: cannot allocate memory (ramrod)\n");
1122 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1123 init_data.cid = qp->icid;
1124 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1125 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1126 rc = ecore_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
1127 PROTOCOLID_ROCE, &init_data);
1128 if (rc != ECORE_SUCCESS)
1131 p_resp_ramrod = &p_ent->ramrod.roce_query_qp_resp;
1132 DMA_REGPAIR_LE(p_resp_ramrod->output_params_addr, resp_ramrod_res_phys);
1134 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1135 if (rc != ECORE_SUCCESS)
1138 out_params->rq_psn = OSAL_LE32_TO_CPU(p_resp_ramrod_res->psn);
1139 error_flag = GET_FIELD(
1140 OSAL_LE32_TO_CPU(p_resp_ramrod_res->err_flag),
1141 ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
1143 qp->cur_state = ECORE_ROCE_QP_STATE_ERR;
1146 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_resp_ramrod_res,
1147 resp_ramrod_res_phys,
1148 sizeof(*p_resp_ramrod_res));
1153 static OSAL_INLINE enum _ecore_status_t ecore_roce_sp_query_requester(
1154 struct ecore_hwfn *p_hwfn,
1155 struct ecore_rdma_qp *qp,
1156 struct ecore_rdma_query_qp_out_params *out_params,
1159 struct roce_query_qp_req_output_params *p_req_ramrod_res;
1160 struct roce_query_qp_req_ramrod_data *p_req_ramrod;
1161 struct ecore_sp_init_data init_data;
1162 dma_addr_t req_ramrod_res_phys;
1163 struct ecore_spq_entry *p_ent;
1164 enum _ecore_status_t rc = ECORE_SUCCESS;
1167 if (!qp->req_offloaded)
1169 /* Don't send query qp for the requester */
1170 out_params->sq_psn = qp->sq_psn;
1171 out_params->draining = false;
1175 return ECORE_SUCCESS;
1178 /* Send a query requester ramrod to the FW */
1179 p_req_ramrod_res = (struct roce_query_qp_req_output_params *)
1180 OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &req_ramrod_res_phys,
1181 sizeof(*p_req_ramrod_res));
1182 if (!p_req_ramrod_res)
1184 DP_NOTICE(p_hwfn, false,
1185 "ecore query qp failed: cannot allocate memory (ramrod). rc = %d\n",
1191 init_data.cid = qp->icid + 1;
1192 rc = ecore_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
1193 PROTOCOLID_ROCE, &init_data);
1194 if (rc != ECORE_SUCCESS)
1197 p_req_ramrod = &p_ent->ramrod.roce_query_qp_req;
1198 DMA_REGPAIR_LE(p_req_ramrod->output_params_addr, req_ramrod_res_phys);
1200 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1201 if (rc != ECORE_SUCCESS)
1204 out_params->sq_psn = OSAL_LE32_TO_CPU(p_req_ramrod_res->psn);
1205 error_flag = GET_FIELD(OSAL_LE32_TO_CPU(p_req_ramrod_res->flags),
1206 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG);
1208 qp->cur_state = ECORE_ROCE_QP_STATE_ERR;
1210 *sq_draining = GET_FIELD(
1211 OSAL_LE32_TO_CPU(p_req_ramrod_res->flags),
1212 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG);
1215 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_req_ramrod_res,
1216 req_ramrod_res_phys, sizeof(*p_req_ramrod_res));
1221 enum _ecore_status_t ecore_roce_query_qp(
1222 struct ecore_hwfn *p_hwfn,
1223 struct ecore_rdma_qp *qp,
1224 struct ecore_rdma_query_qp_out_params *out_params)
1226 enum _ecore_status_t rc;
1228 rc = ecore_roce_sp_query_responder(p_hwfn, qp, out_params);
1232 rc = ecore_roce_sp_query_requester(p_hwfn, qp, out_params,
1233 &out_params->draining);
1237 out_params->state = qp->cur_state;
1239 return ECORE_SUCCESS;
1242 enum _ecore_status_t ecore_roce_destroy_qp(struct ecore_hwfn *p_hwfn,
1243 struct ecore_rdma_qp *qp,
1244 struct ecore_rdma_destroy_qp_out_params *out_params)
1246 u32 cq_prod_resp = qp->cq_prod.resp, cq_prod_req = qp->cq_prod.req;
1247 u32 num_invalidated_mw = 0;
1248 u32 num_bound_mw = 0;
1249 enum _ecore_status_t rc;
1251 /* Destroys the specified QP
1252 * Note: if qp state != RESET/ERR/INIT then upper driver first need to
1253 * call modify qp to move the qp to ERR state
1255 if ((qp->cur_state != ECORE_ROCE_QP_STATE_RESET) &&
1256 (qp->cur_state != ECORE_ROCE_QP_STATE_ERR) &&
1257 (qp->cur_state != ECORE_ROCE_QP_STATE_INIT))
1261 "QP must be in error, reset or init state before destroying it\n");
1265 if (qp->cur_state != ECORE_ROCE_QP_STATE_RESET) {
1266 rc = ecore_roce_sp_destroy_qp_responder(p_hwfn,
1268 &num_invalidated_mw,
1270 if (rc != ECORE_SUCCESS)
1273 /* Send destroy requester ramrod */
1274 rc = ecore_roce_sp_destroy_qp_requester(p_hwfn, qp,
1277 if (rc != ECORE_SUCCESS)
1280 /* resp_ofload was true, num_invalidated_mw is valid */
1281 if (num_invalidated_mw != num_bound_mw) {
1284 "number of invalidate memory windows is different from bounded ones\n");
1289 ecore_roce_free_qp(p_hwfn, qp->qp_idx);
1291 out_params->rq_cq_prod = cq_prod_resp;
1292 out_params->sq_cq_prod = cq_prod_req;
1294 return ECORE_SUCCESS;
1297 enum _ecore_status_t ecore_roce_destroy_ud_qp(void *rdma_cxt, u16 cid)
1299 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1300 struct ecore_sp_init_data init_data;
1301 struct ecore_spq_entry *p_ent;
1302 enum _ecore_status_t rc;
1305 DP_ERR(p_hwfn->p_dev,
1306 "destroy ud qp failed due to NULL rdma_cxt\n");
1311 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1312 init_data.cid = cid;
1313 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1314 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1315 rc = ecore_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_UD_QP,
1316 PROTOCOLID_ROCE, &init_data);
1317 if (rc != ECORE_SUCCESS)
1320 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1321 if (rc != ECORE_SUCCESS)
1324 ecore_roce_free_qp(p_hwfn, ECORE_ROCE_ICID_TO_QP(cid));
1326 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "freed a ud qp with cid=%d\n", cid);
1328 return ECORE_SUCCESS;
1331 DP_ERR(p_hwfn, "failed destroying a ud qp with cid=%d\n", cid);
1337 enum _ecore_status_t ecore_roce_create_ud_qp(void *rdma_cxt,
1338 struct ecore_rdma_create_qp_out_params *out_params)
1340 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1341 struct ecore_sp_init_data init_data;
1342 struct ecore_spq_entry *p_ent;
1343 enum _ecore_status_t rc;
1346 if (!rdma_cxt || !out_params) {
1347 DP_ERR(p_hwfn->p_dev,
1348 "ecore roce create ud qp failed due to NULL entry (rdma_cxt=%p, out=%p)\n",
1349 rdma_cxt, out_params);
1353 rc = ecore_roce_alloc_qp_idx(p_hwfn, &qp_idx);
1354 if (rc != ECORE_SUCCESS)
1357 icid = ECORE_ROCE_QP_TO_ICID(qp_idx);
1360 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1361 init_data.cid = icid;
1362 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1363 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1364 rc = ecore_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_UD_QP,
1365 PROTOCOLID_ROCE, &init_data);
1366 if (rc != ECORE_SUCCESS)
1369 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1370 if (rc != ECORE_SUCCESS)
1373 out_params->icid = icid;
1374 out_params->qp_id = ((0xFF << 16) | icid);
1376 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "created a ud qp with icid=%d\n",
1379 return ECORE_SUCCESS;
1382 ecore_roce_free_qp(p_hwfn, qp_idx);
1385 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "failed creating a ud qp\n");
1391 enum _ecore_status_t
1392 ecore_roce_modify_qp(struct ecore_hwfn *p_hwfn,
1393 struct ecore_rdma_qp *qp,
1394 enum ecore_roce_qp_state prev_state,
1395 struct ecore_rdma_modify_qp_in_params *params)
1397 u32 num_invalidated_mw = 0, num_bound_mw = 0;
1398 enum _ecore_status_t rc = ECORE_SUCCESS;
1400 /* Perform additional operations according to the current state and the
1403 if (((prev_state == ECORE_ROCE_QP_STATE_INIT) ||
1404 (prev_state == ECORE_ROCE_QP_STATE_RESET)) &&
1405 (qp->cur_state == ECORE_ROCE_QP_STATE_RTR))
1407 /* Init->RTR or Reset->RTR */
1409 /* Verify the cid bits that of this qp index are clear */
1410 rc = ecore_roce_wait_free_cids(p_hwfn, qp->qp_idx);
1414 rc = ecore_roce_sp_create_responder(p_hwfn, qp);
1417 } else if ((prev_state == ECORE_ROCE_QP_STATE_RTR) &&
1418 (qp->cur_state == ECORE_ROCE_QP_STATE_RTS))
1421 rc = ecore_roce_sp_create_requester(p_hwfn, qp);
1422 if (rc != ECORE_SUCCESS)
1425 /* Send modify responder ramrod */
1426 rc = ecore_roce_sp_modify_responder(p_hwfn, qp, false,
1427 params->modify_flags);
1430 } else if ((prev_state == ECORE_ROCE_QP_STATE_RTS) &&
1431 (qp->cur_state == ECORE_ROCE_QP_STATE_RTS))
1434 rc = ecore_roce_sp_modify_responder(p_hwfn, qp, false,
1435 params->modify_flags);
1436 if (rc != ECORE_SUCCESS)
1439 rc = ecore_roce_sp_modify_requester(p_hwfn, qp, false, false,
1440 params->modify_flags);
1443 } else if ((prev_state == ECORE_ROCE_QP_STATE_RTS) &&
1444 (qp->cur_state == ECORE_ROCE_QP_STATE_SQD))
1447 rc = ecore_roce_sp_modify_requester(p_hwfn, qp, true, false,
1448 params->modify_flags);
1451 } else if ((prev_state == ECORE_ROCE_QP_STATE_SQD) &&
1452 (qp->cur_state == ECORE_ROCE_QP_STATE_SQD))
1455 rc = ecore_roce_sp_modify_responder(p_hwfn, qp, false,
1456 params->modify_flags);
1457 if (rc != ECORE_SUCCESS)
1460 rc = ecore_roce_sp_modify_requester(p_hwfn, qp, false, false,
1461 params->modify_flags);
1464 } else if ((prev_state == ECORE_ROCE_QP_STATE_SQD) &&
1465 (qp->cur_state == ECORE_ROCE_QP_STATE_RTS))
1468 rc = ecore_roce_sp_modify_responder(p_hwfn, qp, false,
1469 params->modify_flags);
1470 if (rc != ECORE_SUCCESS)
1473 rc = ecore_roce_sp_modify_requester(p_hwfn, qp, false, false,
1474 params->modify_flags);
1477 } else if (qp->cur_state == ECORE_ROCE_QP_STATE_ERR) {
1479 rc = ecore_roce_sp_modify_responder(p_hwfn, qp, true,
1480 params->modify_flags);
1481 if (rc != ECORE_SUCCESS)
1484 rc = ecore_roce_sp_modify_requester(p_hwfn, qp, false, true,
1485 params->modify_flags);
1488 } else if (qp->cur_state == ECORE_ROCE_QP_STATE_RESET) {
1489 /* Any state -> RESET */
1491 /* Send destroy responder ramrod */
1492 rc = ecore_roce_sp_destroy_qp_responder(p_hwfn, qp,
1493 &num_invalidated_mw,
1496 if (rc != ECORE_SUCCESS)
1499 rc = ecore_roce_sp_destroy_qp_requester(p_hwfn, qp,
1504 if (rc != ECORE_SUCCESS)
1507 if (num_invalidated_mw != num_bound_mw) {
1510 "number of invalidate memory windows is different from bounded ones\n");
1514 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "ECORE_SUCCESS\n");
1520 static void ecore_roce_free_icid(struct ecore_hwfn *p_hwfn, u16 icid)
1522 struct ecore_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
1525 start_cid = ecore_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto);
1526 cid = icid - start_cid;
1528 OSAL_SPIN_LOCK(&p_rdma_info->lock);
1530 ecore_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, cid);
1532 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
1535 static void ecore_rdma_dpm_conf(struct ecore_hwfn *p_hwfn,
1536 struct ecore_ptt *p_ptt)
1540 val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1;
1542 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val);
1543 DP_VERBOSE(p_hwfn, (ECORE_MSG_DCB | ECORE_MSG_RDMA),
1544 "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n",
1545 val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm);
1548 /* This function disables EDPM due to DCBx considerations */
1549 void ecore_roce_dpm_dcbx(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1553 /* if any QPs are already active, we want to disable DPM, since their
1554 * context information contains information from before the latest DCBx
1555 * update. Otherwise enable it.
1557 val = (ecore_rdma_allocated_qps(p_hwfn)) ? true : false;
1558 p_hwfn->dcbx_no_edpm = (u8)val;
1560 ecore_rdma_dpm_conf(p_hwfn, p_ptt);
1563 /* This function disables EDPM due to doorbell bar considerations */
1564 void ecore_rdma_dpm_bar(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1566 p_hwfn->db_bar_no_edpm = true;
1568 ecore_rdma_dpm_conf(p_hwfn, p_ptt);
1571 enum _ecore_status_t ecore_roce_setup(struct ecore_hwfn *p_hwfn)
1573 return ecore_spq_register_async_cb(p_hwfn, PROTOCOLID_ROCE,
1574 ecore_roce_async_event);
1578 #pragma warning(pop)