2 * Copyright (c) 2018-2019 Cavium, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
36 #include "ecore_status.h"
37 #include "ecore_sp_commands.h"
38 #include "ecore_cxt.h"
39 #include "ecore_rdma.h"
41 #include "ecore_rt_defs.h"
42 #include "ecore_init_ops.h"
44 #include "ecore_mcp.h"
45 #include "ecore_init_fw_funcs.h"
46 #include "ecore_int.h"
47 #include "pcics_reg_driver.h"
48 #include "ecore_iro.h"
49 #include "ecore_gtt_reg_addr.h"
50 #include "ecore_hsi_iwarp.h"
51 #include "ecore_ll2.h"
52 #include "ecore_ooo.h"
54 #include "ecore_tcp_ip.h"
57 enum _ecore_status_t ecore_rdma_bmap_alloc(struct ecore_hwfn *p_hwfn,
58 struct ecore_bmap *bmap,
64 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "max_count = %08x\n", max_count);
66 bmap->max_count = max_count;
69 bmap->bitmap = OSAL_NULL;
73 size_in_bytes = sizeof(unsigned long) *
74 DIV_ROUND_UP(max_count, (sizeof(unsigned long) * 8));
76 bmap->bitmap = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size_in_bytes);
79 DP_NOTICE(p_hwfn, false,
80 "ecore bmap alloc failed: cannot allocate memory (bitmap). rc = %d\n",
85 OSAL_SNPRINTF(bmap->name, QEDR_MAX_BMAP_NAME, "%s", name);
87 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "ECORE_SUCCESS\n");
91 enum _ecore_status_t ecore_rdma_bmap_alloc_id(struct ecore_hwfn *p_hwfn,
92 struct ecore_bmap *bmap,
95 *id_num = OSAL_FIND_FIRST_ZERO_BIT(bmap->bitmap, bmap->max_count);
96 if (*id_num >= bmap->max_count)
99 OSAL_SET_BIT(*id_num, bmap->bitmap);
101 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "%s bitmap: allocated id %d\n",
102 bmap->name, *id_num);
104 return ECORE_SUCCESS;
107 void ecore_bmap_set_id(struct ecore_hwfn *p_hwfn,
108 struct ecore_bmap *bmap,
111 if (id_num >= bmap->max_count) {
112 DP_NOTICE(p_hwfn, true,
113 "%s bitmap: cannot set id %d max is %d\n",
114 bmap->name, id_num, bmap->max_count);
119 OSAL_SET_BIT(id_num, bmap->bitmap);
121 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "%s bitmap: set id %d\n",
125 void ecore_bmap_release_id(struct ecore_hwfn *p_hwfn,
126 struct ecore_bmap *bmap,
131 if (id_num >= bmap->max_count)
134 b_acquired = OSAL_TEST_AND_CLEAR_BIT(id_num, bmap->bitmap);
137 DP_NOTICE(p_hwfn, false, "%s bitmap: id %d already released\n",
142 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "%s bitmap: released id %d\n",
146 int ecore_bmap_test_id(struct ecore_hwfn *p_hwfn,
147 struct ecore_bmap *bmap,
150 if (id_num >= bmap->max_count) {
151 DP_NOTICE(p_hwfn, true,
152 "%s bitmap: id %d too high. max is %d\n",
153 bmap->name, id_num, bmap->max_count);
157 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "%s bitmap: tested id %d\n",
160 return OSAL_TEST_BIT(id_num, bmap->bitmap);
163 static bool ecore_bmap_is_empty(struct ecore_bmap *bmap)
165 return (bmap->max_count ==
166 OSAL_FIND_FIRST_BIT(bmap->bitmap, bmap->max_count));
170 u32 ecore_rdma_get_sb_id(struct ecore_hwfn *p_hwfn, u32 rel_sb_id)
172 /* first sb id for RoCE is after all the l2 sb */
173 return FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE) + rel_sb_id;
176 u32 ecore_rdma_query_cau_timer_res(void)
178 return ECORE_CAU_DEF_RX_TIMER_RES;
182 enum _ecore_status_t ecore_rdma_info_alloc(struct ecore_hwfn *p_hwfn)
184 struct ecore_rdma_info *p_rdma_info;
186 p_rdma_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_rdma_info));
188 DP_NOTICE(p_hwfn, false,
189 "ecore rdma alloc failed: cannot allocate memory (rdma info).\n");
192 p_hwfn->p_rdma_info = p_rdma_info;
194 #ifdef CONFIG_ECORE_LOCK_ALLOC
195 if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_rdma_info->lock)) {
196 ecore_rdma_info_free(p_hwfn);
200 OSAL_SPIN_LOCK_INIT(&p_rdma_info->lock);
202 return ECORE_SUCCESS;
205 void ecore_rdma_info_free(struct ecore_hwfn *p_hwfn)
207 #ifdef CONFIG_ECORE_LOCK_ALLOC
208 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_rdma_info->lock);
210 OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_rdma_info);
211 p_hwfn->p_rdma_info = OSAL_NULL;
214 static enum _ecore_status_t ecore_rdma_inc_ref_cnt(struct ecore_hwfn *p_hwfn)
216 enum _ecore_status_t rc = ECORE_INVAL;
218 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
219 if (p_hwfn->p_rdma_info->active) {
220 p_hwfn->p_rdma_info->ref_cnt++;
223 DP_INFO(p_hwfn, "Ref cnt requested for inactive rdma\n");
225 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
229 static void ecore_rdma_dec_ref_cnt(struct ecore_hwfn *p_hwfn)
231 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
232 p_hwfn->p_rdma_info->ref_cnt--;
233 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
236 static void ecore_rdma_activate(struct ecore_hwfn *p_hwfn)
238 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
239 p_hwfn->p_rdma_info->active = true;
240 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
243 /* Part of deactivating rdma is letting all the relevant flows complete before
244 * we start shutting down: Currently query-stats which can be called from MCP
247 /* The longest time it can take a rdma flow to complete */
248 #define ECORE_RDMA_MAX_FLOW_TIME (100)
249 static enum _ecore_status_t ecore_rdma_deactivate(struct ecore_hwfn *p_hwfn)
253 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
254 p_hwfn->p_rdma_info->active = false;
255 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
257 /* We'll give each flow it's time to complete... */
258 wait_count = p_hwfn->p_rdma_info->ref_cnt;
260 while (p_hwfn->p_rdma_info->ref_cnt) {
261 OSAL_MSLEEP(ECORE_RDMA_MAX_FLOW_TIME);
262 if (--wait_count == 0) {
263 DP_NOTICE(p_hwfn, false,
264 "Timeout on refcnt=%d\n",
265 p_hwfn->p_rdma_info->ref_cnt);
266 return ECORE_TIMEOUT;
269 return ECORE_SUCCESS;
272 static enum _ecore_status_t ecore_rdma_alloc(struct ecore_hwfn *p_hwfn)
274 struct ecore_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
275 u32 num_cons, num_tasks;
276 enum _ecore_status_t rc;
278 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Allocating RDMA\n");
283 if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_IWARP)
284 p_rdma_info->proto = PROTOCOLID_IWARP;
286 p_rdma_info->proto = PROTOCOLID_ROCE;
288 num_cons = ecore_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto,
291 if (IS_IWARP(p_hwfn))
292 p_rdma_info->num_qps = num_cons;
294 p_rdma_info->num_qps = num_cons / 2;
296 /* INTERNAL: RoCE & iWARP use the same taskid */
297 num_tasks = ecore_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE);
299 /* Each MR uses a single task */
300 p_rdma_info->num_mrs = num_tasks;
302 /* Queue zone lines are shared between RoCE and L2 in such a way that
303 * they can be used by each without obstructing the other.
305 p_rdma_info->queue_zone_base = (u16) RESC_START(p_hwfn, ECORE_L2_QUEUE);
306 p_rdma_info->max_queue_zones = (u16) RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
308 /* Allocate a struct with device params and fill it */
309 p_rdma_info->dev = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_rdma_info->dev));
310 if (!p_rdma_info->dev)
313 DP_NOTICE(p_hwfn, false,
314 "ecore rdma alloc failed: cannot allocate memory (rdma info dev). rc = %d\n",
319 /* Allocate a struct with port params and fill it */
320 p_rdma_info->port = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_rdma_info->port));
321 if (!p_rdma_info->port)
323 DP_NOTICE(p_hwfn, false,
324 "ecore rdma alloc failed: cannot allocate memory (rdma info port)\n");
328 /* Allocate bit map for pd's */
329 rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS,
331 if (rc != ECORE_SUCCESS)
333 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
334 "Failed to allocate pd_map,rc = %d\n",
339 /* Allocate bit map for XRC Domains */
340 rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->xrcd_map,
341 ECORE_RDMA_MAX_XRCDS, "XRCD");
342 if (rc != ECORE_SUCCESS)
344 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
345 "Failed to allocate xrcd_map,rc = %d\n",
350 /* Allocate DPI bitmap */
351 rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
352 p_hwfn->dpi_count, "DPI");
353 if (rc != ECORE_SUCCESS)
355 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
356 "Failed to allocate DPI bitmap, rc = %d\n", rc);
360 /* Allocate bitmap for cq's. The maximum number of CQs is bounded to
361 * twice the number of QPs.
363 rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map,
365 if (rc != ECORE_SUCCESS)
367 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
368 "Failed to allocate cq bitmap, rc = %d\n", rc);
372 /* Allocate bitmap for toggle bit for cq icids
373 * We toggle the bit every time we create or resize cq for a given icid.
374 * The maximum number of CQs is bounded to the number of connections we
375 * support. (num_qps in iWARP or num_qps/2 in RoCE).
377 rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits,
379 if (rc != ECORE_SUCCESS)
381 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
382 "Failed to allocate toogle bits, rc = %d\n", rc);
386 /* Allocate bitmap for itids */
387 rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map,
388 p_rdma_info->num_mrs, "MR");
389 if (rc != ECORE_SUCCESS)
391 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
392 "Failed to allocate itids bitmaps, rc = %d\n", rc);
396 /* Allocate bitmap for qps. */
397 rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->qp_map,
398 p_rdma_info->num_qps, "QP");
399 if (rc != ECORE_SUCCESS)
401 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
402 "Failed to allocate qp bitmap, rc = %d\n", rc);
406 /* Allocate bitmap for cids used for responders/requesters. */
407 rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons,
409 if (rc != ECORE_SUCCESS)
411 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
412 "Failed to allocate cid bitmap, rc = %d\n", rc);
416 /* The first SRQ follows the last XRC SRQ. This means that the
417 * SRQ IDs start from an offset equals to max_xrc_srqs.
419 p_rdma_info->srq_id_offset = (u16)ecore_cxt_get_xrc_srq_count(p_hwfn);
420 rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->xrc_srq_map,
421 p_rdma_info->srq_id_offset, "XRC SRQ");
422 if (rc != ECORE_SUCCESS) {
423 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
424 "Failed to allocate xrc srq bitmap, rc = %d\n", rc);
428 /* Allocate bitmap for srqs */
429 p_rdma_info->num_srqs = ecore_cxt_get_srq_count(p_hwfn);
430 rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->srq_map,
431 p_rdma_info->num_srqs,
433 if (rc != ECORE_SUCCESS) {
434 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
435 "Failed to allocate srq bitmap, rc = %d\n", rc);
440 if (IS_IWARP(p_hwfn))
441 rc = ecore_iwarp_alloc(p_hwfn);
443 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d\n", rc);
448 void ecore_rdma_bmap_free(struct ecore_hwfn *p_hwfn,
449 struct ecore_bmap *bmap,
452 int weight, line, item, last_line, last_item;
455 if (!bmap || !bmap->bitmap)
461 weight = OSAL_BITMAP_WEIGHT(bmap->bitmap, bmap->max_count);
465 DP_NOTICE(p_hwfn, false,
466 "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n",
467 bmap->name, bmap->max_count, weight);
469 pmap = (u64 *)bmap->bitmap;
470 last_line = bmap->max_count / (64*8);
471 last_item = last_line * 8 + (((bmap->max_count % (64*8)) + 63) / 64);
473 /* print aligned non-zero lines, if any */
474 for (item = 0, line = 0; line < last_line; line++, item += 8) {
475 if (OSAL_BITMAP_WEIGHT((unsigned long *)&pmap[item], 64*8))
476 DP_NOTICE(p_hwfn, false,
477 "line 0x%04x: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
478 line, (unsigned long long)pmap[item],
479 (unsigned long long)pmap[item+1],
480 (unsigned long long)pmap[item+2],
481 (unsigned long long)pmap[item+3],
482 (unsigned long long)pmap[item+4],
483 (unsigned long long)pmap[item+5],
484 (unsigned long long)pmap[item+6],
485 (unsigned long long)pmap[item+7]);
488 /* print last unaligned non-zero line, if any */
489 if ((bmap->max_count % (64*8)) &&
490 (OSAL_BITMAP_WEIGHT((unsigned long *)&pmap[item],
491 bmap->max_count-item*64))) {
492 u8 str_last_line[200] = { 0 };
495 offset = OSAL_SPRINTF(str_last_line, "line 0x%04x: ", line);
496 for (; item < last_item; item++) {
497 offset += OSAL_SPRINTF(str_last_line+offset,
499 (unsigned long long)pmap[item]);
501 DP_NOTICE(p_hwfn, false, "%s\n", str_last_line);
505 OSAL_FREE(p_hwfn->p_dev, bmap->bitmap);
506 bmap->bitmap = OSAL_NULL;
510 void ecore_rdma_resc_free(struct ecore_hwfn *p_hwfn)
512 if (IS_IWARP(p_hwfn))
513 ecore_iwarp_resc_free(p_hwfn);
515 ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1);
516 ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->qp_map, 1);
517 ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1);
518 ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->xrcd_map, 1);
519 ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1);
520 ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1);
521 ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0);
522 ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1);
523 ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->srq_map, 1);
524 ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->xrc_srq_map, 1);
526 OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_rdma_info->port);
527 p_hwfn->p_rdma_info->port = OSAL_NULL;
529 OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_rdma_info->dev);
530 p_hwfn->p_rdma_info->dev = OSAL_NULL;
533 static OSAL_INLINE void ecore_rdma_free_reserved_lkey(struct ecore_hwfn *p_hwfn)
535 ecore_rdma_free_tid(p_hwfn, p_hwfn->p_rdma_info->dev->reserved_lkey);
538 static void ecore_rdma_free_ilt(struct ecore_hwfn *p_hwfn)
540 /* Free Connection CXT */
541 ecore_cxt_free_ilt_range(
542 p_hwfn, ECORE_ELEM_CXT,
543 ecore_cxt_get_proto_cid_start(p_hwfn,
544 p_hwfn->p_rdma_info->proto),
545 ecore_cxt_get_proto_cid_count(p_hwfn,
546 p_hwfn->p_rdma_info->proto,
549 /* Free Task CXT ( Intentionally RoCE as task-id is shared between
552 ecore_cxt_free_ilt_range(p_hwfn, ECORE_ELEM_TASK, 0,
553 ecore_cxt_get_proto_tid_count(
554 p_hwfn, PROTOCOLID_ROCE));
557 ecore_cxt_free_ilt_range(p_hwfn, ECORE_ELEM_SRQ, 0,
558 ecore_cxt_get_srq_count(p_hwfn));
561 static void ecore_rdma_free(struct ecore_hwfn *p_hwfn)
563 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "\n");
565 ecore_rdma_free_reserved_lkey(p_hwfn);
567 ecore_rdma_resc_free(p_hwfn);
569 ecore_rdma_free_ilt(p_hwfn);
572 static void ecore_rdma_get_guid(struct ecore_hwfn *p_hwfn, u8 *guid)
576 OSAL_MEMCPY(&mac_addr[0], &p_hwfn->hw_info.hw_mac_addr[0], ETH_ALEN);
577 guid[0] = mac_addr[0] ^ 2;
578 guid[1] = mac_addr[1];
579 guid[2] = mac_addr[2];
582 guid[5] = mac_addr[3];
583 guid[6] = mac_addr[4];
584 guid[7] = mac_addr[5];
588 static void ecore_rdma_init_events(
589 struct ecore_hwfn *p_hwfn,
590 struct ecore_rdma_start_in_params *params)
592 struct ecore_rdma_events *events;
594 events = &p_hwfn->p_rdma_info->events;
596 events->unaffiliated_event = params->events->unaffiliated_event;
597 events->affiliated_event = params->events->affiliated_event;
598 events->context = params->events->context;
601 static void ecore_rdma_init_devinfo(
602 struct ecore_hwfn *p_hwfn,
603 struct ecore_rdma_start_in_params *params)
605 struct ecore_rdma_device *dev = p_hwfn->p_rdma_info->dev;
606 u32 pci_status_control;
608 /* Vendor specific information */
609 dev->vendor_id = p_hwfn->p_dev->vendor_id;
610 dev->vendor_part_id = p_hwfn->p_dev->device_id;
612 dev->fw_ver = STORM_FW_VERSION;
614 ecore_rdma_get_guid(p_hwfn, (u8 *)(&dev->sys_image_guid));
615 dev->node_guid = dev->sys_image_guid;
617 dev->max_sge = OSAL_MIN_T(u32, RDMA_MAX_SGE_PER_SQ_WQE,
618 RDMA_MAX_SGE_PER_RQ_WQE);
620 if (p_hwfn->p_dev->rdma_max_sge) {
621 dev->max_sge = OSAL_MIN_T(u32,
622 p_hwfn->p_dev->rdma_max_sge,
626 /* Set these values according to configuration
627 * MAX SGE for SRQ is not defined by FW for now
628 * define it in driver.
629 * TODO: Get this value from FW.
631 dev->max_srq_sge = ECORE_RDMA_MAX_SGE_PER_SRQ_WQE;
632 if (p_hwfn->p_dev->rdma_max_srq_sge) {
633 dev->max_srq_sge = OSAL_MIN_T(u32,
634 p_hwfn->p_dev->rdma_max_srq_sge,
638 dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE;
639 dev->max_inline = (p_hwfn->p_dev->rdma_max_inline) ?
641 p_hwfn->p_dev->rdma_max_inline,
645 dev->max_wqe = ECORE_RDMA_MAX_WQE;
646 dev->max_cnq = (u8)FEAT_NUM(p_hwfn, ECORE_RDMA_CNQ);
648 /* The number of QPs may be higher than ECORE_ROCE_MAX_QPS. because
649 * it is up-aligned to 16 and then to ILT page size within ecore cxt.
650 * This is OK in terms of ILT but we don't want to configure the FW
651 * above its abilities
653 dev->max_qp = OSAL_MIN_T(u64, ROCE_MAX_QPS,
654 p_hwfn->p_rdma_info->num_qps);
656 /* CQs uses the same icids that QPs use hence they are limited by the
657 * number of icids. There are two icids per QP.
659 dev->max_cq = dev->max_qp * 2;
661 /* The number of mrs is smaller by 1 since the first is reserved */
662 dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1;
663 dev->max_mr_size = ECORE_RDMA_MAX_MR_SIZE;
664 /* The maximum CQE capacity per CQ supported */
665 /* max number of cqes will be in two layer pbl,
666 * 8 is the pointer size in bytes
667 * 32 is the size of cq element in bytes
669 if (params->roce.cq_mode == ECORE_RDMA_CQ_MODE_32_BITS)
670 dev->max_cqe = ECORE_RDMA_MAX_CQE_32_BIT;
672 dev->max_cqe = ECORE_RDMA_MAX_CQE_16_BIT;
675 dev->max_fmr = ECORE_RDMA_MAX_FMR;
676 dev->max_mr_mw_fmr_pbl = (OSAL_PAGE_SIZE/8) * (OSAL_PAGE_SIZE/8);
677 dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * OSAL_PAGE_SIZE;
678 dev->max_pkey = ECORE_RDMA_MAX_P_KEY;
679 /* Right now we dont take any parameters from user
680 * So assign predefined max_srq to num_srqs.
682 dev->max_srq = p_hwfn->p_rdma_info->num_srqs;
685 dev->max_srq_wr = ECORE_RDMA_MAX_SRQ_WQE_ELEM;
687 dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
688 (RDMA_RESP_RD_ATOMIC_ELM_SIZE*2);
689 dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
690 RDMA_REQ_RD_ATOMIC_ELM_SIZE;
692 dev->max_dev_resp_rd_atomic_resc =
693 dev->max_qp_resp_rd_atomic_resc * p_hwfn->p_rdma_info->num_qps;
694 dev->page_size_caps = ECORE_RDMA_PAGE_SIZE_CAPS;
695 dev->dev_ack_delay = ECORE_RDMA_ACK_DELAY;
696 dev->max_pd = RDMA_MAX_PDS;
697 dev->max_ah = dev->max_qp;
698 dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, ECORE_RDMA_STATS_QUEUE);
700 /* Set capablities */
702 SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_RNR_NAK, 1);
703 SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1);
704 SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1);
705 SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_RESIZE_CQ, 1);
706 SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1);
707 SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1);
708 SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_ZBVA, 1);
709 SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1);
711 /* Check atomic operations support in PCI configuration space. */
712 OSAL_PCI_READ_CONFIG_DWORD(p_hwfn->p_dev,
713 PCICFG_DEVICE_STATUS_CONTROL_2,
714 &pci_status_control);
716 if (pci_status_control &
717 PCICFG_DEVICE_STATUS_CONTROL_2_ATOMIC_REQ_ENABLE)
718 SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_ATOMIC_OP, 1);
720 if (IS_IWARP(p_hwfn))
721 ecore_iwarp_init_devinfo(p_hwfn);
724 static void ecore_rdma_init_port(
725 struct ecore_hwfn *p_hwfn)
727 struct ecore_rdma_port *port = p_hwfn->p_rdma_info->port;
728 struct ecore_rdma_device *dev = p_hwfn->p_rdma_info->dev;
730 port->port_state = p_hwfn->mcp_info->link_output.link_up ?
731 ECORE_RDMA_PORT_UP : ECORE_RDMA_PORT_DOWN;
733 port->max_msg_size = OSAL_MIN_T(u64,
734 (dev->max_mr_mw_fmr_size *
735 p_hwfn->p_dev->rdma_max_sge),
738 port->pkey_bad_counter = 0;
741 static enum _ecore_status_t ecore_rdma_init_hw(
742 struct ecore_hwfn *p_hwfn,
743 struct ecore_ptt *p_ptt)
745 u32 ll2_ethertype_en;
747 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Initializing HW\n");
748 p_hwfn->b_rdma_enabled_in_prs = false;
750 if (IS_IWARP(p_hwfn))
751 return ecore_iwarp_init_hw(p_hwfn, p_ptt);
755 PRS_REG_ROCE_DEST_QP_MAX_PF,
758 p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE;
760 /* We delay writing to this reg until first cid is allocated. See
761 * ecore_cxt_dynamic_ilt_alloc function for more details
764 ll2_ethertype_en = ecore_rd(p_hwfn,
766 PRS_REG_LIGHT_L2_ETHERTYPE_EN);
767 ecore_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
768 (ll2_ethertype_en | 0x01));
770 #ifndef REAL_ASIC_ONLY
771 if (ECORE_IS_BB_A0(p_hwfn->p_dev) && ECORE_IS_CMT(p_hwfn->p_dev)) {
774 NIG_REG_LLH_ENG_CLS_ENG_ID_TBL,
778 NIG_REG_LLH_ENG_CLS_ENG_ID_TBL + 4,
783 if (ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2)
787 "The first RoCE's cid should be even\n");
788 return ECORE_UNKNOWN_ERROR;
791 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Initializing HW - Done\n");
792 return ECORE_SUCCESS;
795 static enum _ecore_status_t
796 ecore_rdma_start_fw(struct ecore_hwfn *p_hwfn,
798 struct ecore_ptt *p_ptt,
800 struct ecore_ptt OSAL_UNUSED *p_ptt,
802 struct ecore_rdma_start_in_params *params)
804 struct rdma_init_func_ramrod_data *p_ramrod;
805 struct rdma_init_func_hdr *pheader;
806 struct ecore_rdma_info *p_rdma_info;
807 struct ecore_sp_init_data init_data;
808 struct ecore_spq_entry *p_ent;
809 u16 igu_sb_id, sb_id;
812 enum _ecore_status_t rc;
814 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Starting FW\n");
816 p_rdma_info = p_hwfn->p_rdma_info;
818 /* Save the number of cnqs for the function close ramrod */
819 p_rdma_info->num_cnqs = params->desired_cnq;
822 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
823 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
824 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
826 rc = ecore_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT,
827 p_rdma_info->proto, &init_data);
828 if (rc != ECORE_SUCCESS)
831 if (IS_IWARP(p_hwfn)) {
832 ecore_iwarp_init_fw_ramrod(p_hwfn,
833 &p_ent->ramrod.iwarp_init_func);
834 p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma;
838 rc = ecore_roce_dcqcn_cfg(p_hwfn, ¶ms->roce.dcqcn_params,
839 &p_ent->ramrod.roce_init_func, p_ptt);
840 if (rc != ECORE_SUCCESS) {
841 DP_NOTICE(p_hwfn, false,
842 "Failed to configure DCQCN. rc = %d.\n", rc);
846 p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
848 /* The ll2_queue_id is used only for UD QPs */
849 ll2_queue_id = ecore_ll2_handle_to_queue_id(
850 p_hwfn, params->roce.ll2_handle);
851 p_ent->ramrod.roce_init_func.roce.ll2_queue_id = ll2_queue_id;
855 pheader = &p_ramrod->params_header;
856 pheader->cnq_start_offset = (u8)RESC_START(p_hwfn, ECORE_RDMA_CNQ_RAM);
857 pheader->num_cnqs = params->desired_cnq;
859 /* The first SRQ ILT page is used for XRC SRQs and all the following
860 * pages contain regular SRQs. Hence the first regular SRQ ID is the
861 * maximum number XRC SRQs.
863 pheader->first_reg_srq_id = p_rdma_info->srq_id_offset;
864 pheader->reg_srq_base_addr =
865 ecore_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM);
867 if (params->roce.cq_mode == ECORE_RDMA_CQ_MODE_16_BITS)
868 pheader->cq_ring_mode = 1; /* 1=16 bits */
870 pheader->cq_ring_mode = 0; /* 0=32 bits */
872 for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++)
874 sb_id = (u16)OSAL_GET_RDMA_SB_ID(p_hwfn, cnq_id);
875 igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id);
876 p_ramrod->cnq_params[cnq_id].sb_num =
877 OSAL_CPU_TO_LE16(igu_sb_id);
879 p_ramrod->cnq_params[cnq_id].sb_index =
880 p_hwfn->pf_params.rdma_pf_params.gl_pi;
882 p_ramrod->cnq_params[cnq_id].num_pbl_pages =
883 params->cnq_pbl_list[cnq_id].num_pbl_pages;
885 p_ramrod->cnq_params[cnq_id].pbl_base_addr.hi =
886 DMA_HI_LE(params->cnq_pbl_list[cnq_id].pbl_ptr);
887 p_ramrod->cnq_params[cnq_id].pbl_base_addr.lo =
888 DMA_LO_LE(params->cnq_pbl_list[cnq_id].pbl_ptr);
890 /* we arbitrarily decide that cnq_id will be as qz_offset */
891 p_ramrod->cnq_params[cnq_id].queue_zone_num =
892 OSAL_CPU_TO_LE16(p_rdma_info->queue_zone_base + cnq_id);
895 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
900 enum _ecore_status_t ecore_rdma_alloc_tid(void *rdma_cxt,
903 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
904 enum _ecore_status_t rc;
906 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Allocate TID\n");
908 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
909 rc = ecore_rdma_bmap_alloc_id(p_hwfn,
910 &p_hwfn->p_rdma_info->tid_map,
912 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
913 if (rc != ECORE_SUCCESS) {
914 DP_NOTICE(p_hwfn, false, "Failed in allocating tid\n");
918 rc = ecore_cxt_dynamic_ilt_alloc(p_hwfn, ECORE_ELEM_TASK, *itid);
920 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
924 static OSAL_INLINE enum _ecore_status_t ecore_rdma_reserve_lkey(
925 struct ecore_hwfn *p_hwfn)
927 struct ecore_rdma_device *dev = p_hwfn->p_rdma_info->dev;
929 /* Tid 0 will be used as the key for "reserved MR".
930 * The driver should allocate memory for it so it can be loaded but no
931 * ramrod should be passed on it.
933 ecore_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey);
934 if (dev->reserved_lkey != RDMA_RESERVED_LKEY)
936 DP_NOTICE(p_hwfn, true,
937 "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n");
941 return ECORE_SUCCESS;
944 static enum _ecore_status_t ecore_rdma_setup(struct ecore_hwfn *p_hwfn,
945 struct ecore_ptt *p_ptt,
946 struct ecore_rdma_start_in_params *params)
948 enum _ecore_status_t rc = 0;
950 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "RDMA setup\n");
952 ecore_rdma_init_devinfo(p_hwfn, params);
953 ecore_rdma_init_port(p_hwfn);
954 ecore_rdma_init_events(p_hwfn, params);
956 rc = ecore_rdma_reserve_lkey(p_hwfn);
957 if (rc != ECORE_SUCCESS)
960 rc = ecore_rdma_init_hw(p_hwfn, p_ptt);
961 if (rc != ECORE_SUCCESS)
964 if (IS_IWARP(p_hwfn)) {
965 rc = ecore_iwarp_setup(p_hwfn, params);
966 if (rc != ECORE_SUCCESS)
969 rc = ecore_roce_setup(p_hwfn);
970 if (rc != ECORE_SUCCESS)
974 return ecore_rdma_start_fw(p_hwfn, p_ptt, params);
978 enum _ecore_status_t ecore_rdma_stop(void *rdma_cxt)
980 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
981 struct rdma_close_func_ramrod_data *p_ramrod;
982 struct ecore_sp_init_data init_data;
983 struct ecore_spq_entry *p_ent;
984 struct ecore_ptt *p_ptt;
985 u32 ll2_ethertype_en;
986 enum _ecore_status_t rc = ECORE_TIMEOUT;
988 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "RDMA stop\n");
990 rc = ecore_rdma_deactivate(p_hwfn);
991 if (rc != ECORE_SUCCESS)
994 p_ptt = ecore_ptt_acquire(p_hwfn);
996 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Failed to acquire PTT\n");
1001 ecore_roce_stop_rl(p_hwfn);
1004 /* Disable RoCE search */
1005 ecore_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
1006 p_hwfn->b_rdma_enabled_in_prs = false;
1010 PRS_REG_ROCE_DEST_QP_MAX_PF,
1013 ll2_ethertype_en = ecore_rd(p_hwfn,
1015 PRS_REG_LIGHT_L2_ETHERTYPE_EN);
1017 ecore_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
1018 (ll2_ethertype_en & 0xFFFE));
1020 #ifndef REAL_ASIC_ONLY
1021 /* INTERNAL: In CMT mode, re-initialize nig to direct packets to both
1022 * enginesfor L2 performance, Roce requires all traffic to go just to
1025 if (ECORE_IS_BB_A0(p_hwfn->p_dev) && ECORE_IS_CMT(p_hwfn->p_dev)) {
1026 DP_ERR(p_hwfn->p_dev,
1027 "On Everest 4 Big Bear Board revision A0 when RoCE driver is loaded L2 performance is sub-optimal (all traffic is routed to engine 0). For optimal L2 results either remove RoCE driver or use board revision B0\n");
1031 NIG_REG_LLH_ENG_CLS_ENG_ID_TBL,
1035 NIG_REG_LLH_ENG_CLS_ENG_ID_TBL + 0x4,
1040 if (IS_IWARP(p_hwfn)) {
1041 rc = ecore_iwarp_stop(p_hwfn);
1042 if (rc != ECORE_SUCCESS) {
1043 ecore_ptt_release(p_hwfn, p_ptt);
1047 rc = ecore_roce_stop(p_hwfn);
1048 if (rc != ECORE_SUCCESS) {
1049 ecore_ptt_release(p_hwfn, p_ptt);
1054 ecore_ptt_release(p_hwfn, p_ptt);
1057 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1058 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1059 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1062 rc = ecore_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE,
1063 p_hwfn->p_rdma_info->proto, &init_data);
1064 if (rc != ECORE_SUCCESS)
1067 p_ramrod = &p_ent->ramrod.rdma_close_func;
1069 p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs;
1070 p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, ECORE_RDMA_CNQ_RAM);
1072 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1075 ecore_rdma_free(p_hwfn);
1077 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "RDMA stop done, rc = %d\n", rc);
1081 enum _ecore_status_t ecore_rdma_add_user(void *rdma_cxt,
1082 struct ecore_rdma_add_user_out_params *out_params)
1084 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1085 u32 dpi_start_offset;
1086 u32 returned_id = 0;
1087 enum _ecore_status_t rc;
1089 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Adding User\n");
1092 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
1093 rc = ecore_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map,
1095 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
1097 if (rc != ECORE_SUCCESS)
1098 DP_NOTICE(p_hwfn, false, "Failed in allocating dpi\n");
1100 out_params->dpi = (u16)returned_id;
1102 /* Calculate the corresponding DPI address */
1103 dpi_start_offset = p_hwfn->dpi_start_offset;
1105 out_params->dpi_addr = (u64)(osal_int_ptr_t)((u8 OSAL_IOMEM*)p_hwfn->doorbells +
1107 ((out_params->dpi) * p_hwfn->dpi_size));
1109 out_params->dpi_phys_addr = p_hwfn->db_phys_addr + dpi_start_offset +
1110 out_params->dpi * p_hwfn->dpi_size;
1112 out_params->dpi_size = p_hwfn->dpi_size;
1113 out_params->wid_count = p_hwfn->wid_count;
1115 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Adding user - done, rc = %d\n", rc);
1119 struct ecore_rdma_port *ecore_rdma_query_port(void *rdma_cxt)
1121 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1122 struct ecore_rdma_port *p_port = p_hwfn->p_rdma_info->port;
1123 struct ecore_mcp_link_state *p_link_output;
1125 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "RDMA Query port\n");
1127 /* The link state is saved only for the leading hwfn */
1129 &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
1131 /* Link may have changed... */
1132 p_port->port_state = p_link_output->link_up ? ECORE_RDMA_PORT_UP
1133 : ECORE_RDMA_PORT_DOWN;
1135 p_port->link_speed = p_link_output->speed;
1137 p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE;
1142 struct ecore_rdma_device *ecore_rdma_query_device(void *rdma_cxt)
1144 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1146 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Query device\n");
1148 /* Return struct with device parameters */
1149 return p_hwfn->p_rdma_info->dev;
1152 void ecore_rdma_free_tid(void *rdma_cxt,
1155 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1157 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "itid = %08x\n", itid);
1159 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
1160 ecore_bmap_release_id(p_hwfn,
1161 &p_hwfn->p_rdma_info->tid_map,
1163 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
1166 void ecore_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
1168 struct ecore_hwfn *p_hwfn;
1172 p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1174 if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) {
1175 DP_NOTICE(p_hwfn, false,
1176 "queue zone offset %d is too large (max is %d)\n",
1177 qz_offset, p_hwfn->p_rdma_info->max_queue_zones);
1181 qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
1182 addr = GTT_BAR0_MAP_REG_USDM_RAM +
1183 USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
1185 REG_WR16(p_hwfn, addr, prod);
1187 /* keep prod updates ordered */
1188 OSAL_WMB(p_hwfn->p_dev);
1191 enum _ecore_status_t ecore_rdma_alloc_pd(void *rdma_cxt,
1194 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1196 enum _ecore_status_t rc;
1198 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Alloc PD\n");
1200 /* Allocates an unused protection domain */
1201 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
1202 rc = ecore_rdma_bmap_alloc_id(p_hwfn,
1203 &p_hwfn->p_rdma_info->pd_map,
1205 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
1206 if (rc != ECORE_SUCCESS)
1207 DP_NOTICE(p_hwfn, false, "Failed in allocating pd id\n");
1209 *pd = (u16)returned_id;
1211 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc);
1215 void ecore_rdma_free_pd(void *rdma_cxt,
1218 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1220 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "pd = %08x\n", pd);
1222 /* Returns a previously allocated protection domain for reuse */
1223 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
1224 ecore_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd);
1225 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
1228 enum _ecore_status_t ecore_rdma_alloc_xrcd(void *rdma_cxt,
1231 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1233 enum _ecore_status_t rc;
1235 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Alloc XRCD\n");
1237 /* Allocates an unused XRC domain */
1238 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
1239 rc = ecore_rdma_bmap_alloc_id(p_hwfn,
1240 &p_hwfn->p_rdma_info->xrcd_map,
1242 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
1243 if (rc != ECORE_SUCCESS)
1244 DP_NOTICE(p_hwfn, false, "Failed in allocating xrcd id\n");
1246 *xrcd_id = (u16)returned_id;
1248 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Alloc XRCD - done, rc = %d\n", rc);
1252 void ecore_rdma_free_xrcd(void *rdma_cxt,
1255 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1257 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "xrcd_id = %08x\n", xrcd_id);
1259 /* Returns a previously allocated protection domain for reuse */
1260 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
1261 ecore_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->xrcd_map, xrcd_id);
1262 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
1265 static enum ecore_rdma_toggle_bit
1266 ecore_rdma_toggle_bit_create_resize_cq(struct ecore_hwfn *p_hwfn,
1269 struct ecore_rdma_info *p_info = p_hwfn->p_rdma_info;
1270 enum ecore_rdma_toggle_bit toggle_bit;
1273 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "icid = %08x\n", icid);
1275 /* the function toggle the bit that is related to a given icid
1276 * and returns the new toggle bit's value
1278 bmap_id = icid - ecore_cxt_get_proto_cid_start(p_hwfn, p_info->proto);
1280 OSAL_SPIN_LOCK(&p_info->lock);
1281 toggle_bit = !OSAL_TEST_AND_FLIP_BIT(bmap_id, p_info->toggle_bits.bitmap);
1282 OSAL_SPIN_UNLOCK(&p_info->lock);
1284 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "ECORE_RDMA_TOGGLE_BIT_= %d\n",
1290 enum _ecore_status_t ecore_rdma_create_cq(void *rdma_cxt,
1291 struct ecore_rdma_create_cq_in_params *params,
1294 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1295 struct ecore_rdma_info *p_info = p_hwfn->p_rdma_info;
1296 struct rdma_create_cq_ramrod_data *p_ramrod;
1297 enum ecore_rdma_toggle_bit toggle_bit;
1298 struct ecore_sp_init_data init_data;
1299 struct ecore_spq_entry *p_ent;
1300 enum _ecore_status_t rc;
1303 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "cq_handle = %08x%08x\n",
1304 params->cq_handle_hi, params->cq_handle_lo);
1307 OSAL_SPIN_LOCK(&p_info->lock);
1308 rc = ecore_rdma_bmap_alloc_id(p_hwfn, &p_info->cq_map, &returned_id);
1309 OSAL_SPIN_UNLOCK(&p_info->lock);
1311 if (rc != ECORE_SUCCESS)
1313 DP_NOTICE(p_hwfn, false, "Can't create CQ, rc = %d\n", rc);
1317 *icid = (u16)(returned_id +
1318 ecore_cxt_get_proto_cid_start(
1319 p_hwfn, p_info->proto));
1321 /* Check if icid requires a page allocation */
1322 rc = ecore_cxt_dynamic_ilt_alloc(p_hwfn, ECORE_ELEM_CXT, *icid);
1323 if (rc != ECORE_SUCCESS)
1327 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1328 init_data.cid = *icid;
1329 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1330 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1332 /* Send create CQ ramrod */
1333 rc = ecore_sp_init_request(p_hwfn, &p_ent,
1334 RDMA_RAMROD_CREATE_CQ,
1335 p_info->proto, &init_data);
1336 if (rc != ECORE_SUCCESS)
1339 p_ramrod = &p_ent->ramrod.rdma_create_cq;
1341 p_ramrod->cq_handle.hi = OSAL_CPU_TO_LE32(params->cq_handle_hi);
1342 p_ramrod->cq_handle.lo = OSAL_CPU_TO_LE32(params->cq_handle_lo);
1343 p_ramrod->dpi = OSAL_CPU_TO_LE16(params->dpi);
1344 p_ramrod->is_two_level_pbl = params->pbl_two_level;
1345 p_ramrod->max_cqes = OSAL_CPU_TO_LE32(params->cq_size);
1346 DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr);
1347 p_ramrod->pbl_num_pages = OSAL_CPU_TO_LE16(params->pbl_num_pages);
1348 p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, ECORE_RDMA_CNQ_RAM)
1350 p_ramrod->int_timeout = params->int_timeout;
1351 /* INTERNAL: Two layer PBL is currently not supported, ignoring next line */
1352 /* INTERNAL: p_ramrod->pbl_log_page_size = params->pbl_page_size_log - 12; */
1354 /* toggle the bit for every resize or create cq for a given icid */
1355 toggle_bit = ecore_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
1357 p_ramrod->toggle_bit = toggle_bit;
1359 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1360 if (rc != ECORE_SUCCESS) {
1361 /* restore toggle bit */
1362 ecore_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
1366 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Created CQ, rc = %d\n", rc);
1370 /* release allocated icid */
1371 OSAL_SPIN_LOCK(&p_info->lock);
1372 ecore_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id);
1373 OSAL_SPIN_UNLOCK(&p_info->lock);
1375 DP_NOTICE(p_hwfn, false, "Create CQ failed, rc = %d\n", rc);
1380 enum _ecore_status_t ecore_rdma_destroy_cq(void *rdma_cxt,
1381 struct ecore_rdma_destroy_cq_in_params *in_params,
1382 struct ecore_rdma_destroy_cq_out_params *out_params)
1384 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1385 struct rdma_destroy_cq_output_params *p_ramrod_res;
1386 struct rdma_destroy_cq_ramrod_data *p_ramrod;
1387 struct ecore_sp_init_data init_data;
1388 struct ecore_spq_entry *p_ent;
1389 dma_addr_t ramrod_res_phys;
1390 enum _ecore_status_t rc = ECORE_NOMEM;
1392 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "icid = %08x\n", in_params->icid);
1394 p_ramrod_res = (struct rdma_destroy_cq_output_params *)
1395 OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &ramrod_res_phys,
1396 sizeof(struct rdma_destroy_cq_output_params));
1399 DP_NOTICE(p_hwfn, false,
1400 "ecore destroy cq failed: cannot allocate memory (ramrod)\n");
1405 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1406 init_data.cid = in_params->icid;
1407 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1408 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1410 /* Send destroy CQ ramrod */
1411 rc = ecore_sp_init_request(p_hwfn, &p_ent,
1412 RDMA_RAMROD_DESTROY_CQ,
1413 p_hwfn->p_rdma_info->proto, &init_data);
1414 if (rc != ECORE_SUCCESS)
1417 p_ramrod = &p_ent->ramrod.rdma_destroy_cq;
1418 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
1420 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1421 if (rc != ECORE_SUCCESS)
1424 out_params->num_cq_notif =
1425 OSAL_LE16_TO_CPU(p_ramrod_res->cnq_num);
1427 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_ramrod_res, ramrod_res_phys,
1428 sizeof(struct rdma_destroy_cq_output_params));
1431 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
1433 ecore_bmap_release_id(p_hwfn,
1434 &p_hwfn->p_rdma_info->cq_map,
1435 (in_params->icid - ecore_cxt_get_proto_cid_start(
1436 p_hwfn, p_hwfn->p_rdma_info->proto)));
1438 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
1440 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc);
1444 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_ramrod_res, ramrod_res_phys,
1445 sizeof(struct rdma_destroy_cq_output_params));
1450 void ecore_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_ecore_mac)
1452 p_fw_mac[0] = OSAL_CPU_TO_LE16((p_ecore_mac[0] << 8) + p_ecore_mac[1]);
1453 p_fw_mac[1] = OSAL_CPU_TO_LE16((p_ecore_mac[2] << 8) + p_ecore_mac[3]);
1454 p_fw_mac[2] = OSAL_CPU_TO_LE16((p_ecore_mac[4] << 8) + p_ecore_mac[5]);
1458 enum _ecore_status_t ecore_rdma_query_qp(void *rdma_cxt,
1459 struct ecore_rdma_qp *qp,
1460 struct ecore_rdma_query_qp_out_params *out_params)
1463 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1464 enum _ecore_status_t rc = ECORE_SUCCESS;
1466 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "icid = %08x\n", qp->icid);
1468 /* The following fields are filled in from qp and not FW as they can't
1471 out_params->mtu = qp->mtu;
1472 out_params->dest_qp = qp->dest_qp;
1473 out_params->incoming_atomic_en = qp->incoming_atomic_en;
1474 out_params->e2e_flow_control_en = qp->e2e_flow_control_en;
1475 out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en;
1476 out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en;
1477 out_params->dgid = qp->dgid;
1478 out_params->flow_label = qp->flow_label;
1479 out_params->hop_limit_ttl = qp->hop_limit_ttl;
1480 out_params->traffic_class_tos = qp->traffic_class_tos;
1481 out_params->timeout = qp->ack_timeout;
1482 out_params->rnr_retry = qp->rnr_retry_cnt;
1483 out_params->retry_cnt = qp->retry_cnt;
1484 out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer;
1485 out_params->pkey_index = 0;
1486 out_params->max_rd_atomic = qp->max_rd_atomic_req;
1487 out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp;
1488 out_params->sqd_async = qp->sqd_async;
1490 if (IS_IWARP(p_hwfn))
1491 rc = ecore_iwarp_query_qp(qp, out_params);
1493 rc = ecore_roce_query_qp(p_hwfn, qp, out_params);
1495 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Query QP, rc = %d\n", rc);
1500 enum _ecore_status_t ecore_rdma_destroy_qp(void *rdma_cxt,
1501 struct ecore_rdma_qp *qp,
1502 struct ecore_rdma_destroy_qp_out_params *out_params)
1504 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1505 enum _ecore_status_t rc = ECORE_SUCCESS;
1507 if (!rdma_cxt || !qp) {
1509 "ecore rdma destroy qp failed: invalid NULL input. rdma_cxt=%p, qp=%p\n",
1514 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "QP(0x%x)\n", qp->icid);
1516 if (IS_IWARP(p_hwfn))
1517 rc = ecore_iwarp_destroy_qp(p_hwfn, qp);
1519 rc = ecore_roce_destroy_qp(p_hwfn, qp, out_params);
1521 /* free qp params struct */
1522 OSAL_FREE(p_hwfn->p_dev, qp);
1528 struct ecore_rdma_qp *ecore_rdma_create_qp(void *rdma_cxt,
1529 struct ecore_rdma_create_qp_in_params *in_params,
1530 struct ecore_rdma_create_qp_out_params *out_params)
1532 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1533 struct ecore_rdma_qp *qp;
1534 u8 max_stats_queues;
1535 enum _ecore_status_t rc = 0;
1537 if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) {
1538 DP_ERR(p_hwfn->p_dev,
1539 "ecore roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
1546 /* Some sanity checks... */
1547 max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues;
1548 if (in_params->stats_queue >= max_stats_queues) {
1549 DP_ERR(p_hwfn->p_dev,
1550 "ecore rdma create qp failed due to invalid statistics queue %d. maximum is %d\n",
1551 in_params->stats_queue, max_stats_queues);
1555 if (IS_IWARP(p_hwfn)) {
1556 if (in_params->sq_num_pages*sizeof(struct regpair) >
1557 IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE) {
1558 DP_NOTICE(p_hwfn->p_dev, true, "Sq num pages: %d exceeds maximum\n",
1559 in_params->sq_num_pages);
1562 if (in_params->rq_num_pages*sizeof(struct regpair) >
1563 IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE) {
1564 DP_NOTICE(p_hwfn->p_dev, true,
1565 "Rq num pages: %d exceeds maximum\n",
1566 in_params->rq_num_pages);
1571 qp = OSAL_ZALLOC(p_hwfn->p_dev,
1573 sizeof(struct ecore_rdma_qp));
1576 DP_NOTICE(p_hwfn, false, "Failed to allocate ecore_rdma_qp\n");
1580 qp->cur_state = ECORE_ROCE_QP_STATE_RESET;
1581 #ifdef CONFIG_ECORE_IWARP
1582 qp->iwarp_state = ECORE_IWARP_QP_STATE_IDLE;
1584 qp->qp_handle.hi = OSAL_CPU_TO_LE32(in_params->qp_handle_hi);
1585 qp->qp_handle.lo = OSAL_CPU_TO_LE32(in_params->qp_handle_lo);
1586 qp->qp_handle_async.hi = OSAL_CPU_TO_LE32(in_params->qp_handle_async_hi);
1587 qp->qp_handle_async.lo = OSAL_CPU_TO_LE32(in_params->qp_handle_async_lo);
1588 qp->use_srq = in_params->use_srq;
1589 qp->signal_all = in_params->signal_all;
1590 qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey;
1591 qp->pd = in_params->pd;
1592 qp->dpi = in_params->dpi;
1593 qp->sq_cq_id = in_params->sq_cq_id;
1594 qp->sq_num_pages = in_params->sq_num_pages;
1595 qp->sq_pbl_ptr = in_params->sq_pbl_ptr;
1596 qp->rq_cq_id = in_params->rq_cq_id;
1597 qp->rq_num_pages = in_params->rq_num_pages;
1598 qp->rq_pbl_ptr = in_params->rq_pbl_ptr;
1599 qp->srq_id = in_params->srq_id;
1600 qp->req_offloaded = false;
1601 qp->resp_offloaded = false;
1602 /* e2e_flow_control cannot be done in case of S-RQ.
1603 * Refer to 9.7.7.2 End-to-End Flow Control section of IB spec
1605 qp->e2e_flow_control_en = qp->use_srq ? false : true;
1606 qp->stats_queue = in_params->stats_queue;
1607 qp->qp_type = in_params->qp_type;
1608 qp->xrcd_id = in_params->xrcd_id;
1610 if (IS_IWARP(p_hwfn)) {
1611 rc = ecore_iwarp_create_qp(p_hwfn, qp, out_params);
1612 qp->qpid = qp->icid;
1614 rc = ecore_roce_alloc_qp_idx(p_hwfn, &qp->qp_idx);
1615 qp->icid = ECORE_ROCE_QP_TO_ICID(qp->qp_idx);
1616 qp->qpid = ((0xFF << 16) | qp->icid);
1619 if (rc != ECORE_SUCCESS) {
1620 OSAL_FREE(p_hwfn->p_dev, qp);
1624 out_params->icid = qp->icid;
1625 out_params->qp_id = qp->qpid;
1627 /* INTERNAL: max_sq_sges future use only*/
1629 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Create QP, rc = %d\n", rc);
1633 #define ECORE_RDMA_ECN_SHIFT 0
1634 #define ECORE_RDMA_ECN_MASK 0x3
1635 #define ECORE_RDMA_DSCP_SHIFT 2
1636 #define ECORE_RDMA_DSCP_MASK 0x3f
1637 #define ECORE_RDMA_VLAN_PRIO_SHIFT 13
1638 #define ECORE_RDMA_VLAN_PRIO_MASK 0x7
1639 enum _ecore_status_t ecore_rdma_modify_qp(
1641 struct ecore_rdma_qp *qp,
1642 struct ecore_rdma_modify_qp_in_params *params)
1644 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1645 enum ecore_roce_qp_state prev_state;
1646 enum _ecore_status_t rc = ECORE_SUCCESS;
1648 if (GET_FIELD(params->modify_flags,
1649 ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN))
1651 qp->incoming_rdma_read_en = params->incoming_rdma_read_en;
1652 qp->incoming_rdma_write_en = params->incoming_rdma_write_en;
1653 qp->incoming_atomic_en = params->incoming_atomic_en;
1656 /* Update QP structure with the updated values */
1657 if (GET_FIELD(params->modify_flags,
1658 ECORE_ROCE_MODIFY_QP_VALID_ROCE_MODE))
1660 qp->roce_mode = params->roce_mode;
1662 if (GET_FIELD(params->modify_flags, ECORE_ROCE_MODIFY_QP_VALID_PKEY))
1664 qp->pkey = params->pkey;
1666 if (GET_FIELD(params->modify_flags,
1667 ECORE_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN))
1669 qp->e2e_flow_control_en = params->e2e_flow_control_en;
1671 if (GET_FIELD(params->modify_flags,
1672 ECORE_ROCE_MODIFY_QP_VALID_DEST_QP))
1674 qp->dest_qp = params->dest_qp;
1676 if (GET_FIELD(params->modify_flags,
1677 ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR))
1679 /* Indicates that the following parameters have changed:
1680 * Traffic class, flow label, hop limit, source GID,
1681 * destination GID, loopback indicator
1683 qp->flow_label = params->flow_label;
1684 qp->hop_limit_ttl = params->hop_limit_ttl;
1686 qp->sgid = params->sgid;
1687 qp->dgid = params->dgid;
1688 qp->udp_src_port = params->udp_src_port;
1689 qp->vlan_id = params->vlan_id;
1690 qp->traffic_class_tos = params->traffic_class_tos;
1692 /* apply global override values */
1693 if (p_hwfn->p_rdma_info->glob_cfg.vlan_pri_en)
1694 SET_FIELD(qp->vlan_id, ECORE_RDMA_VLAN_PRIO,
1695 p_hwfn->p_rdma_info->glob_cfg.vlan_pri);
1697 if (p_hwfn->p_rdma_info->glob_cfg.ecn_en)
1698 SET_FIELD(qp->traffic_class_tos, ECORE_RDMA_ECN,
1699 p_hwfn->p_rdma_info->glob_cfg.ecn);
1701 if (p_hwfn->p_rdma_info->glob_cfg.dscp_en)
1702 SET_FIELD(qp->traffic_class_tos, ECORE_RDMA_DSCP,
1703 p_hwfn->p_rdma_info->glob_cfg.dscp);
1705 qp->mtu = params->mtu;
1707 OSAL_MEMCPY((u8 *)&qp->remote_mac_addr[0],
1708 (u8 *)¶ms->remote_mac_addr[0], ETH_ALEN);
1709 if (params->use_local_mac) {
1710 OSAL_MEMCPY((u8 *)&qp->local_mac_addr[0],
1711 (u8 *)¶ms->local_mac_addr[0],
1714 OSAL_MEMCPY((u8 *)&qp->local_mac_addr[0],
1715 (u8 *)&p_hwfn->hw_info.hw_mac_addr,
1719 if (GET_FIELD(params->modify_flags, ECORE_ROCE_MODIFY_QP_VALID_RQ_PSN))
1721 qp->rq_psn = params->rq_psn;
1723 if (GET_FIELD(params->modify_flags, ECORE_ROCE_MODIFY_QP_VALID_SQ_PSN))
1725 qp->sq_psn = params->sq_psn;
1727 if (GET_FIELD(params->modify_flags,
1728 ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ))
1730 qp->max_rd_atomic_req = params->max_rd_atomic_req;
1732 if (GET_FIELD(params->modify_flags,
1733 ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP))
1735 qp->max_rd_atomic_resp = params->max_rd_atomic_resp;
1737 if (GET_FIELD(params->modify_flags,
1738 ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT))
1740 qp->ack_timeout = params->ack_timeout;
1742 if (GET_FIELD(params->modify_flags,
1743 ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT))
1745 qp->retry_cnt = params->retry_cnt;
1747 if (GET_FIELD(params->modify_flags,
1748 ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT))
1750 qp->rnr_retry_cnt = params->rnr_retry_cnt;
1752 if (GET_FIELD(params->modify_flags,
1753 ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER))
1755 qp->min_rnr_nak_timer = params->min_rnr_nak_timer;
1758 qp->sqd_async = params->sqd_async;
1760 prev_state = qp->cur_state;
1761 if (GET_FIELD(params->modify_flags,
1762 ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE))
1764 qp->cur_state = params->new_state;
1765 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "qp->cur_state=%d\n",
1769 if (qp->qp_type == ECORE_RDMA_QP_TYPE_XRC_INI) {
1771 } else if (qp->qp_type == ECORE_RDMA_QP_TYPE_XRC_TGT)
1779 if (IS_IWARP(p_hwfn)) {
1780 enum ecore_iwarp_qp_state new_state =
1781 ecore_roce2iwarp_state(qp->cur_state);
1783 rc = ecore_iwarp_modify_qp(p_hwfn, qp, new_state, 0);
1785 rc = ecore_roce_modify_qp(p_hwfn, qp, prev_state, params);
1788 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Modify QP, rc = %d\n", rc);
1792 enum _ecore_status_t ecore_rdma_register_tid(void *rdma_cxt,
1793 struct ecore_rdma_register_tid_in_params *params)
1795 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1796 struct rdma_register_tid_ramrod_data *p_ramrod;
1797 struct ecore_sp_init_data init_data;
1798 struct ecore_spq_entry *p_ent;
1799 enum rdma_tid_type tid_type;
1801 enum _ecore_status_t rc;
1803 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "itid = %08x\n", params->itid);
1806 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1807 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1808 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1810 rc = ecore_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR,
1811 p_hwfn->p_rdma_info->proto, &init_data);
1812 if (rc != ECORE_SUCCESS) {
1813 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d\n", rc);
1817 if (p_hwfn->p_rdma_info->last_tid < params->itid) {
1818 p_hwfn->p_rdma_info->last_tid = params->itid;
1821 p_ramrod = &p_ent->ramrod.rdma_register_tid;
1823 p_ramrod->flags = 0;
1824 SET_FIELD(p_ramrod->flags,
1825 RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL,
1826 params->pbl_two_level);
1828 SET_FIELD(p_ramrod->flags,
1829 RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED,
1832 SET_FIELD(p_ramrod->flags,
1833 RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR,
1836 /* Don't initialize D/C field, as it may override other bits. */
1837 if (!(params->tid_type == ECORE_RDMA_TID_FMR) &&
1839 SET_FIELD(p_ramrod->flags,
1840 RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG,
1841 params->page_size_log - 12);
1843 SET_FIELD(p_ramrod->flags,
1844 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ,
1845 params->remote_read);
1847 SET_FIELD(p_ramrod->flags,
1848 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE,
1849 params->remote_write);
1851 SET_FIELD(p_ramrod->flags,
1852 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC,
1853 params->remote_atomic);
1855 SET_FIELD(p_ramrod->flags,
1856 RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE,
1857 params->local_write);
1859 SET_FIELD(p_ramrod->flags,
1860 RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ,
1861 params->local_read);
1863 SET_FIELD(p_ramrod->flags,
1864 RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND,
1867 SET_FIELD(p_ramrod->flags1,
1868 RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG,
1869 params->pbl_page_size_log - 12);
1871 SET_FIELD(p_ramrod->flags2,
1872 RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR,
1875 switch (params->tid_type)
1877 case ECORE_RDMA_TID_REGISTERED_MR:
1878 tid_type = RDMA_TID_REGISTERED_MR;
1880 case ECORE_RDMA_TID_FMR:
1881 tid_type = RDMA_TID_FMR;
1883 case ECORE_RDMA_TID_MW_TYPE1:
1884 tid_type = RDMA_TID_MW_TYPE1;
1886 case ECORE_RDMA_TID_MW_TYPE2A:
1887 tid_type = RDMA_TID_MW_TYPE2A;
1891 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d\n", rc);
1894 SET_FIELD(p_ramrod->flags1,
1895 RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE,
1898 p_ramrod->itid = OSAL_CPU_TO_LE32(params->itid);
1899 p_ramrod->key = params->key;
1900 p_ramrod->pd = OSAL_CPU_TO_LE16(params->pd);
1901 p_ramrod->length_hi = (u8)(params->length >> 32);
1902 p_ramrod->length_lo = DMA_LO_LE(params->length);
1905 /* Lower 32 bits of the registered MR address.
1906 * In case of zero based MR, will hold FBO
1908 p_ramrod->va.hi = 0;
1909 p_ramrod->va.lo = OSAL_CPU_TO_LE32(params->fbo);
1911 DMA_REGPAIR_LE(p_ramrod->va, params->vaddr);
1913 DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr);
1916 if (params->dif_enabled) {
1917 SET_FIELD(p_ramrod->flags2,
1918 RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1);
1919 DMA_REGPAIR_LE(p_ramrod->dif_error_addr,
1920 params->dif_error_addr);
1921 DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr);
1924 rc = ecore_spq_post(p_hwfn, p_ent, &fw_return_code);
1928 if (fw_return_code != RDMA_RETURN_OK) {
1929 DP_NOTICE(p_hwfn, true, "fw_return_code = %d\n", fw_return_code);
1930 return ECORE_UNKNOWN_ERROR;
1933 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Register TID, rc = %d\n", rc);
1937 static OSAL_INLINE int ecore_rdma_send_deregister_tid_ramrod(
1938 struct ecore_hwfn *p_hwfn,
1942 struct ecore_sp_init_data init_data;
1943 struct rdma_deregister_tid_ramrod_data *p_ramrod;
1944 struct ecore_spq_entry *p_ent;
1945 enum _ecore_status_t rc;
1948 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1949 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1950 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1952 rc = ecore_sp_init_request(p_hwfn, &p_ent,
1953 RDMA_RAMROD_DEREGISTER_MR,
1954 p_hwfn->p_rdma_info->proto, &init_data);
1955 if (rc != ECORE_SUCCESS) {
1956 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d\n", rc);
1960 p_ramrod = &p_ent->ramrod.rdma_deregister_tid;
1961 p_ramrod->itid = OSAL_CPU_TO_LE32(itid);
1963 rc = ecore_spq_post(p_hwfn, p_ent, fw_return_code);
1964 if (rc != ECORE_SUCCESS)
1966 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d\n", rc);
1973 #define ECORE_RDMA_DEREGISTER_TIMEOUT_MSEC (1)
1975 enum _ecore_status_t ecore_rdma_deregister_tid(void *rdma_cxt,
1978 enum _ecore_status_t rc;
1980 struct ecore_ptt *p_ptt;
1981 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1984 rc = ecore_rdma_send_deregister_tid_ramrod(p_hwfn, itid, &fw_ret_code);
1985 if (rc != ECORE_SUCCESS)
1988 if (fw_ret_code != RDMA_RETURN_NIG_DRAIN_REQ)
1991 /* Second attempt, after 1msec, if device still holds data.
1992 * This can occur since 'destroy QP' returns to the caller rather fast.
1993 * The synchronous part of it returns after freeing a few of the
1994 * resources but not all of them, allowing the consumer to continue its
1995 * flow. All of the resources will be freed after the asynchronous part
1996 * of the destroy QP is complete.
1998 OSAL_MSLEEP(ECORE_RDMA_DEREGISTER_TIMEOUT_MSEC);
1999 rc = ecore_rdma_send_deregister_tid_ramrod(p_hwfn, itid, &fw_ret_code);
2000 if (rc != ECORE_SUCCESS)
2003 if (fw_ret_code != RDMA_RETURN_NIG_DRAIN_REQ)
2006 /* Third and last attempt, perform NIG drain and resend the ramrod */
2007 p_ptt = ecore_ptt_acquire(p_hwfn);
2009 return ECORE_TIMEOUT;
2011 rc = ecore_mcp_drain(p_hwfn, p_ptt);
2012 if (rc != ECORE_SUCCESS) {
2013 ecore_ptt_release(p_hwfn, p_ptt);
2017 ecore_ptt_release(p_hwfn, p_ptt);
2019 rc = ecore_rdma_send_deregister_tid_ramrod(p_hwfn, itid, &fw_ret_code);
2020 if (rc != ECORE_SUCCESS)
2024 if (fw_ret_code == RDMA_RETURN_OK) {
2025 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "De-registered itid=%d\n",
2027 return ECORE_SUCCESS;
2028 } else if (fw_ret_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) {
2029 /* INTERNAL: This error is returned in case trying to deregister
2030 * a MR that is not allocated. We define "allocated" as either:
2032 * 2. This is an FMR MR type, which is not currently registered
2033 * but can accept FMR WQEs on SQ.
2035 DP_NOTICE(p_hwfn, false, "itid=%d, fw_ret_code=%d\n", itid,
2038 } else { /* fw_ret_code == RDMA_RETURN_NIG_DRAIN_REQ */
2039 DP_NOTICE(p_hwfn, true,
2040 "deregister failed after three attempts. itid=%d, fw_ret_code=%d\n",
2042 return ECORE_UNKNOWN_ERROR;
2046 static struct ecore_bmap *ecore_rdma_get_srq_bmap(struct ecore_hwfn *p_hwfn, bool is_xrc)
2049 return &p_hwfn->p_rdma_info->xrc_srq_map;
2051 return &p_hwfn->p_rdma_info->srq_map;
2054 u16 ecore_rdma_get_fw_srq_id(struct ecore_hwfn *p_hwfn, u16 id, bool is_xrc)
2059 return id + p_hwfn->p_rdma_info->srq_id_offset;
2062 enum _ecore_status_t
2063 ecore_rdma_modify_srq(void *rdma_cxt,
2064 struct ecore_rdma_modify_srq_in_params *in_params)
2066 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
2067 struct rdma_srq_modify_ramrod_data *p_ramrod;
2068 struct ecore_sp_init_data init_data;
2069 struct ecore_spq_entry *p_ent;
2070 u16 opaque_fid, fw_srq_id;
2071 enum _ecore_status_t rc;
2073 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
2074 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2075 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
2076 /* Send modify SRQ ramrod */
2077 rc = ecore_sp_init_request(p_hwfn, &p_ent,
2078 RDMA_RAMROD_MODIFY_SRQ,
2079 p_hwfn->p_rdma_info->proto, &init_data);
2080 if (rc != ECORE_SUCCESS)
2083 p_ramrod = &p_ent->ramrod.rdma_modify_srq;
2085 fw_srq_id = ecore_rdma_get_fw_srq_id(p_hwfn, in_params->srq_id,
2087 p_ramrod->srq_id.srq_idx = OSAL_CPU_TO_LE16(fw_srq_id);
2088 opaque_fid = p_hwfn->hw_info.opaque_fid;
2089 p_ramrod->srq_id.opaque_fid = OSAL_CPU_TO_LE16(opaque_fid);
2090 p_ramrod->wqe_limit = OSAL_CPU_TO_LE16(in_params->wqe_limit);
2092 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
2093 if (rc != ECORE_SUCCESS)
2096 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "modified SRQ id = %x, is_xrc=%u\n",
2097 in_params->srq_id, in_params->is_xrc);
2102 enum _ecore_status_t
2103 ecore_rdma_destroy_srq(void *rdma_cxt,
2104 struct ecore_rdma_destroy_srq_in_params *in_params)
2106 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
2107 struct rdma_srq_destroy_ramrod_data *p_ramrod;
2108 struct ecore_sp_init_data init_data;
2109 struct ecore_spq_entry *p_ent;
2110 u16 opaque_fid, fw_srq_id;
2111 struct ecore_bmap *bmap;
2112 enum _ecore_status_t rc;
2114 opaque_fid = p_hwfn->hw_info.opaque_fid;
2116 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
2117 init_data.opaque_fid = opaque_fid;
2118 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
2120 /* Send destroy SRQ ramrod */
2121 rc = ecore_sp_init_request(p_hwfn, &p_ent,
2122 RDMA_RAMROD_DESTROY_SRQ,
2123 p_hwfn->p_rdma_info->proto, &init_data);
2124 if (rc != ECORE_SUCCESS)
2127 p_ramrod = &p_ent->ramrod.rdma_destroy_srq;
2129 fw_srq_id = ecore_rdma_get_fw_srq_id(p_hwfn, in_params->srq_id,
2131 p_ramrod->srq_id.srq_idx = OSAL_CPU_TO_LE16(fw_srq_id);
2132 p_ramrod->srq_id.opaque_fid = OSAL_CPU_TO_LE16(opaque_fid);
2134 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
2136 if (rc != ECORE_SUCCESS)
2139 bmap = ecore_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc);
2141 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
2142 ecore_bmap_release_id(p_hwfn, bmap, in_params->srq_id);
2143 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
2145 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2146 "XRC/SRQ destroyed Id = %x, is_xrc=%u\n",
2147 in_params->srq_id, in_params->is_xrc);
2152 enum _ecore_status_t
2153 ecore_rdma_create_srq(void *rdma_cxt,
2154 struct ecore_rdma_create_srq_in_params *in_params,
2155 struct ecore_rdma_create_srq_out_params *out_params)
2157 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
2158 struct rdma_srq_create_ramrod_data *p_ramrod;
2159 struct ecore_sp_init_data init_data;
2160 enum ecore_cxt_elem_type elem_type;
2161 struct ecore_spq_entry *p_ent;
2162 u16 opaque_fid, fw_srq_id;
2163 struct ecore_bmap *bmap;
2165 enum _ecore_status_t rc;
2167 /* Allocate XRC/SRQ ID */
2168 bmap = ecore_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc);
2169 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
2170 rc = ecore_rdma_bmap_alloc_id(p_hwfn, bmap, &returned_id);
2171 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
2173 if (rc != ECORE_SUCCESS) {
2174 DP_NOTICE(p_hwfn, false,
2175 "failed to allocate xrc/srq id (is_xrc=%u)\n",
2179 /* Allocate XRC/SRQ ILT page */
2180 elem_type = (in_params->is_xrc) ? (ECORE_ELEM_XRC_SRQ) : (ECORE_ELEM_SRQ);
2181 rc = ecore_cxt_dynamic_ilt_alloc(p_hwfn, elem_type, returned_id);
2182 if (rc != ECORE_SUCCESS)
2185 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
2186 opaque_fid = p_hwfn->hw_info.opaque_fid;
2187 init_data.opaque_fid = opaque_fid;
2188 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
2190 /* Create XRC/SRQ ramrod */
2191 rc = ecore_sp_init_request(p_hwfn, &p_ent,
2192 RDMA_RAMROD_CREATE_SRQ,
2193 p_hwfn->p_rdma_info->proto, &init_data);
2194 if (rc != ECORE_SUCCESS)
2197 p_ramrod = &p_ent->ramrod.rdma_create_srq;
2199 p_ramrod->pbl_base_addr.hi = DMA_HI_LE(in_params->pbl_base_addr);
2200 p_ramrod->pbl_base_addr.lo = DMA_LO_LE(in_params->pbl_base_addr);
2201 p_ramrod->pages_in_srq_pbl = OSAL_CPU_TO_LE16(in_params->num_pages);
2202 p_ramrod->pd_id = OSAL_CPU_TO_LE16(in_params->pd_id);
2203 p_ramrod->srq_id.opaque_fid = OSAL_CPU_TO_LE16(opaque_fid);
2204 p_ramrod->page_size = OSAL_CPU_TO_LE16(in_params->page_size);
2205 p_ramrod->producers_addr.hi = DMA_HI_LE(in_params->prod_pair_addr);
2206 p_ramrod->producers_addr.lo = DMA_LO_LE(in_params->prod_pair_addr);
2207 fw_srq_id = ecore_rdma_get_fw_srq_id(p_hwfn, (u16) returned_id,
2209 p_ramrod->srq_id.srq_idx = OSAL_CPU_TO_LE16(fw_srq_id);
2211 if (in_params->is_xrc) {
2212 SET_FIELD(p_ramrod->flags,
2213 RDMA_SRQ_CREATE_RAMROD_DATA_XRC_FLAG,
2215 SET_FIELD(p_ramrod->flags,
2216 RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED_KEY_EN,
2217 in_params->reserved_key_en);
2218 p_ramrod->xrc_srq_cq_cid = OSAL_CPU_TO_LE32(in_params->cq_cid);
2219 p_ramrod->xrc_domain = OSAL_CPU_TO_LE16(in_params->xrcd_id);
2222 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
2224 if (rc != ECORE_SUCCESS)
2227 out_params->srq_id = (u16)returned_id;
2229 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "XRC/SRQ created Id = %x (is_xrc=%u)\n",
2230 out_params->srq_id, in_params->is_xrc);
2234 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
2235 ecore_bmap_release_id(p_hwfn, bmap, returned_id);
2236 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
2241 bool ecore_rdma_allocated_qps(struct ecore_hwfn *p_hwfn)
2245 /* if rdma info has not been allocated, naturally there are no qps */
2246 if (!p_hwfn->p_rdma_info)
2249 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
2250 if (!p_hwfn->p_rdma_info->qp_map.bitmap)
2253 result = !ecore_bmap_is_empty(&p_hwfn->p_rdma_info->qp_map);
2254 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
2258 enum _ecore_status_t ecore_rdma_resize_cq(void *rdma_cxt,
2259 struct ecore_rdma_resize_cq_in_params *in_params,
2260 struct ecore_rdma_resize_cq_out_params *out_params)
2262 enum _ecore_status_t rc;
2263 enum ecore_rdma_toggle_bit toggle_bit;
2264 struct ecore_spq_entry *p_ent;
2265 struct rdma_resize_cq_ramrod_data *p_ramrod;
2267 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
2268 dma_addr_t ramrod_res_phys;
2269 struct rdma_resize_cq_output_params *p_ramrod_res;
2270 struct ecore_sp_init_data init_data;
2272 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "icid = %08x\n", in_params->icid);
2274 /* Send resize CQ ramrod */
2276 p_ramrod_res = (struct rdma_resize_cq_output_params *)
2277 OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &ramrod_res_phys,
2278 sizeof(*p_ramrod_res));
2282 DP_NOTICE(p_hwfn, false,
2283 "ecore resize cq failed: cannot allocate memory (ramrod). rc = %d\n",
2289 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
2290 init_data.cid = in_params->icid;
2291 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2292 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
2294 rc = ecore_sp_init_request(p_hwfn, &p_ent,
2295 RDMA_RAMROD_RESIZE_CQ,
2296 p_hwfn->p_rdma_info->proto, &init_data);
2297 if (rc != ECORE_SUCCESS)
2300 p_ramrod = &p_ent->ramrod.rdma_resize_cq;
2302 p_ramrod->flags = 0;
2304 /* toggle the bit for every resize or create cq for a given icid */
2305 toggle_bit = ecore_rdma_toggle_bit_create_resize_cq(p_hwfn,
2308 SET_FIELD(p_ramrod->flags,
2309 RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT,
2312 SET_FIELD(p_ramrod->flags,
2313 RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL,
2314 in_params->pbl_two_level);
2316 p_ramrod->pbl_log_page_size = in_params->pbl_page_size_log - 12;
2317 p_ramrod->pbl_num_pages = OSAL_CPU_TO_LE16(in_params->pbl_num_pages);
2318 p_ramrod->max_cqes = OSAL_CPU_TO_LE32(in_params->cq_size);
2319 p_ramrod->pbl_addr.hi = DMA_HI_LE(in_params->pbl_ptr);
2320 p_ramrod->pbl_addr.lo = DMA_LO_LE(in_params->pbl_ptr);
2322 p_ramrod->output_params_addr.hi = DMA_HI_LE(ramrod_res_phys);
2323 p_ramrod->output_params_addr.lo = DMA_LO_LE(ramrod_res_phys);
2325 rc = ecore_spq_post(p_hwfn, p_ent, &fw_return_code);
2326 if (rc != ECORE_SUCCESS)
2329 if (fw_return_code != RDMA_RETURN_OK)
2331 DP_NOTICE(p_hwfn, fw_return_code != RDMA_RETURN_RESIZE_CQ_ERR,
2332 "fw_return_code = %d\n", fw_return_code);
2334 true, "fw_return_code = %d\n", fw_return_code);
2335 rc = ECORE_UNKNOWN_ERROR;
2339 out_params->prod = OSAL_LE32_TO_CPU(p_ramrod_res->old_cq_prod);
2340 out_params->cons = OSAL_LE32_TO_CPU(p_ramrod_res->old_cq_cons);
2342 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_ramrod_res, ramrod_res_phys,
2343 sizeof(*p_ramrod_res));
2345 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d\n", rc);
2350 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_ramrod_res, ramrod_res_phys,
2351 sizeof(*p_ramrod_res));
2352 DP_NOTICE(p_hwfn, false, "rc = %d\n", rc);
2357 enum _ecore_status_t ecore_rdma_start(void *rdma_cxt,
2358 struct ecore_rdma_start_in_params *params)
2360 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
2361 struct ecore_ptt *p_ptt;
2362 enum _ecore_status_t rc = ECORE_TIMEOUT;
2364 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2365 "desired_cnq = %08x\n", params->desired_cnq);
2367 p_ptt = ecore_ptt_acquire(p_hwfn);
2371 rc = ecore_rdma_alloc(p_hwfn);
2375 rc = ecore_rdma_setup(p_hwfn, p_ptt, params);
2379 ecore_ptt_release(p_hwfn, p_ptt);
2381 ecore_rdma_activate(p_hwfn);
2385 ecore_rdma_free(p_hwfn);
2387 ecore_ptt_release(p_hwfn, p_ptt);
2389 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "RDMA start - error, rc = %d\n", rc);
2393 enum _ecore_status_t ecore_rdma_query_stats(void *rdma_cxt, u8 stats_queue,
2394 struct ecore_rdma_stats_out_params *out_params)
2396 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
2397 u8 abs_stats_queue, max_stats_queues;
2398 u32 pstats_addr, tstats_addr, addr;
2399 struct ecore_rdma_info *info;
2400 struct ecore_ptt *p_ptt;
2401 #ifdef CONFIG_ECORE_IWARP
2404 enum _ecore_status_t rc = ECORE_SUCCESS;
2409 if (!p_hwfn->p_rdma_info) {
2410 DP_INFO(p_hwfn->p_dev, "ecore rdma query stats failed due to NULL rdma_info\n");
2414 info = p_hwfn->p_rdma_info;
2416 rc = ecore_rdma_inc_ref_cnt(p_hwfn);
2417 if (rc != ECORE_SUCCESS)
2420 max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues;
2421 if (stats_queue >= max_stats_queues) {
2422 DP_ERR(p_hwfn->p_dev,
2423 "ecore rdma query stats failed due to invalid statistics queue %d. maximum is %d\n",
2424 stats_queue, max_stats_queues);
2429 /* Statistics collected in statistics queues (for PF/VF) */
2430 abs_stats_queue = RESC_START(p_hwfn, ECORE_RDMA_STATS_QUEUE) +
2432 pstats_addr = BAR0_MAP_REG_PSDM_RAM +
2433 PSTORM_RDMA_QUEUE_STAT_OFFSET(abs_stats_queue);
2434 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
2435 TSTORM_RDMA_QUEUE_STAT_OFFSET(abs_stats_queue);
2437 #ifdef CONFIG_ECORE_IWARP
2438 /* Statistics per PF ID */
2439 xstats_addr = BAR0_MAP_REG_XSDM_RAM +
2440 XSTORM_IWARP_RXMIT_STATS_OFFSET(p_hwfn->rel_pf_id);
2443 OSAL_MEMSET(&info->rdma_sent_pstats, 0, sizeof(info->rdma_sent_pstats));
2444 OSAL_MEMSET(&info->rdma_rcv_tstats, 0, sizeof(info->rdma_rcv_tstats));
2445 OSAL_MEMSET(&info->roce.event_stats, 0, sizeof(info->roce.event_stats));
2446 OSAL_MEMSET(&info->roce.dcqcn_rx_stats, 0,sizeof(info->roce.dcqcn_rx_stats));
2447 OSAL_MEMSET(&info->roce.dcqcn_tx_stats, 0,sizeof(info->roce.dcqcn_tx_stats));
2448 #ifdef CONFIG_ECORE_IWARP
2449 OSAL_MEMSET(&info->iwarp.stats, 0, sizeof(info->iwarp.stats));
2452 p_ptt = ecore_ptt_acquire(p_hwfn);
2456 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d\n", rc);
2460 ecore_memcpy_from(p_hwfn, p_ptt, &info->rdma_sent_pstats,
2461 pstats_addr, sizeof(struct rdma_sent_stats));
2463 ecore_memcpy_from(p_hwfn, p_ptt, &info->rdma_rcv_tstats,
2464 tstats_addr, sizeof(struct rdma_rcv_stats));
2466 addr = BAR0_MAP_REG_TSDM_RAM +
2467 TSTORM_ROCE_EVENTS_STAT_OFFSET(p_hwfn->rel_pf_id);
2468 ecore_memcpy_from(p_hwfn, p_ptt, &info->roce.event_stats, addr,
2469 sizeof(struct roce_events_stats));
2471 addr = BAR0_MAP_REG_YSDM_RAM +
2472 YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(p_hwfn->rel_pf_id);
2473 ecore_memcpy_from(p_hwfn, p_ptt, &info->roce.dcqcn_rx_stats, addr,
2474 sizeof(struct roce_dcqcn_received_stats));
2476 addr = BAR0_MAP_REG_PSDM_RAM +
2477 PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(p_hwfn->rel_pf_id);
2478 ecore_memcpy_from(p_hwfn, p_ptt, &info->roce.dcqcn_tx_stats, addr,
2479 sizeof(struct roce_dcqcn_sent_stats));
2481 #ifdef CONFIG_ECORE_IWARP
2482 ecore_memcpy_from(p_hwfn, p_ptt, &info->iwarp.stats,
2483 xstats_addr, sizeof(struct iwarp_rxmit_stats_drv));
2486 ecore_ptt_release(p_hwfn, p_ptt);
2488 OSAL_MEMSET(out_params, 0, sizeof(*out_params));
2490 out_params->sent_bytes =
2491 HILO_64_REGPAIR(info->rdma_sent_pstats.sent_bytes);
2492 out_params->sent_pkts =
2493 HILO_64_REGPAIR(info->rdma_sent_pstats.sent_pkts);
2494 out_params->rcv_bytes =
2495 HILO_64_REGPAIR(info->rdma_rcv_tstats.rcv_bytes);
2496 out_params->rcv_pkts =
2497 HILO_64_REGPAIR(info->rdma_rcv_tstats.rcv_pkts);
2499 out_params->silent_drops =
2500 OSAL_LE16_TO_CPU(info->roce.event_stats.silent_drops);
2501 out_params->rnr_nacks_sent =
2502 OSAL_LE16_TO_CPU(info->roce.event_stats.rnr_naks_sent);
2503 out_params->icrc_errors =
2504 OSAL_LE32_TO_CPU(info->roce.event_stats.icrc_error_count);
2505 out_params->retransmit_events =
2506 OSAL_LE32_TO_CPU(info->roce.event_stats.retransmit_count);
2507 out_params->ecn_pkt_rcv =
2508 HILO_64_REGPAIR(info->roce.dcqcn_rx_stats.ecn_pkt_rcv);
2509 out_params->cnp_pkt_rcv =
2510 HILO_64_REGPAIR(info->roce.dcqcn_rx_stats.cnp_pkt_rcv);
2511 out_params->cnp_pkt_sent =
2512 HILO_64_REGPAIR(info->roce.dcqcn_tx_stats.cnp_pkt_sent);
2514 #ifdef CONFIG_ECORE_IWARP
2515 out_params->iwarp_tx_fast_rxmit_cnt =
2516 HILO_64_REGPAIR(info->iwarp.stats.tx_fast_retransmit_event_cnt);
2517 out_params->iwarp_tx_slow_start_cnt =
2519 info->iwarp.stats.tx_go_to_slow_start_event_cnt);
2520 out_params->unalign_rx_comp = info->iwarp.unalign_rx_comp;
2524 ecore_rdma_dec_ref_cnt(p_hwfn);
2529 enum _ecore_status_t
2530 ecore_rdma_query_counters(void *rdma_cxt,
2531 struct ecore_rdma_counters_out_params *out_params)
2533 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
2534 unsigned long *bitmap;
2537 if (!p_hwfn->p_rdma_info)
2540 OSAL_MEMSET(out_params, 0, sizeof(*out_params));
2542 bitmap = p_hwfn->p_rdma_info->pd_map.bitmap;
2543 nbits = p_hwfn->p_rdma_info->pd_map.max_count;
2544 out_params->pd_count = OSAL_BITMAP_WEIGHT(bitmap, nbits);
2545 out_params->max_pd = nbits;
2547 bitmap = p_hwfn->p_rdma_info->dpi_map.bitmap;
2548 nbits = p_hwfn->p_rdma_info->dpi_map.max_count;
2549 out_params->dpi_count = OSAL_BITMAP_WEIGHT(bitmap, nbits);
2550 out_params->max_dpi = nbits;
2552 bitmap = p_hwfn->p_rdma_info->cq_map.bitmap;
2553 nbits = p_hwfn->p_rdma_info->cq_map.max_count;
2554 out_params->cq_count = OSAL_BITMAP_WEIGHT(bitmap, nbits);
2555 out_params->max_cq = nbits;
2557 bitmap = p_hwfn->p_rdma_info->qp_map.bitmap;
2558 nbits = p_hwfn->p_rdma_info->qp_map.max_count;
2559 out_params->qp_count = OSAL_BITMAP_WEIGHT(bitmap, nbits);
2560 out_params->max_qp = nbits;
2562 bitmap = p_hwfn->p_rdma_info->tid_map.bitmap;
2563 nbits = p_hwfn->p_rdma_info->tid_map.max_count;
2564 out_params->tid_count = OSAL_BITMAP_WEIGHT(bitmap, nbits);
2565 out_params->max_tid = nbits;
2567 bitmap = p_hwfn->p_rdma_info->srq_map.bitmap;
2568 nbits = p_hwfn->p_rdma_info->srq_map.max_count;
2569 out_params->srq_count = OSAL_BITMAP_WEIGHT(bitmap, nbits);
2570 out_params->max_srq = nbits;
2572 bitmap = p_hwfn->p_rdma_info->xrc_srq_map.bitmap;
2573 nbits = p_hwfn->p_rdma_info->xrc_srq_map.max_count;
2574 out_params->xrc_srq_count = OSAL_BITMAP_WEIGHT(bitmap, nbits);
2575 out_params->max_xrc_srq = nbits;
2577 bitmap = p_hwfn->p_rdma_info->xrcd_map.bitmap;
2578 nbits = p_hwfn->p_rdma_info->xrcd_map.max_count;
2579 out_params->xrcd_count = OSAL_BITMAP_WEIGHT(bitmap, nbits);
2580 out_params->max_xrcd = nbits;
2582 return ECORE_SUCCESS;
2585 enum _ecore_status_t ecore_rdma_resize_cnq(void *rdma_cxt,
2586 struct ecore_rdma_resize_cnq_in_params *params)
2588 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
2590 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "cnq_id = %08x\n", params->cnq_id);
2592 /* @@@TBD: waiting for fw (there is no ramrod yet) */
2593 return ECORE_NOTIMPL;
2596 void ecore_rdma_remove_user(void *rdma_cxt,
2599 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
2601 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "dpi = %08x\n", dpi);
2603 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
2604 ecore_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi);
2605 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
2608 #ifndef LINUX_REMOVE
2609 enum _ecore_status_t
2610 ecore_rdma_set_glob_cfg(struct ecore_hwfn *p_hwfn,
2611 struct ecore_rdma_glob_cfg *in_params,
2614 struct ecore_rdma_glob_cfg glob_cfg;
2615 enum _ecore_status_t rc = ECORE_SUCCESS;
2617 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_RDMA,
2618 "dscp %d dscp en %d ecn %d ecn en %d vlan pri %d vlan_pri_en %d\n",
2619 in_params->dscp, in_params->dscp_en,
2620 in_params->ecn, in_params->ecn_en, in_params->vlan_pri,
2621 in_params->vlan_pri_en);
2623 /* Read global cfg to local */
2624 OSAL_MEMCPY(&glob_cfg, &p_hwfn->p_rdma_info->glob_cfg,
2627 if (glob_cfg_bits & ECORE_RDMA_DCSP_BIT_MASK) {
2628 if (in_params->dscp > MAX_DSCP) {
2629 DP_ERR(p_hwfn->p_dev, "invalid glob dscp %d\n",
2633 glob_cfg.dscp = in_params->dscp;
2636 if (glob_cfg_bits & ECORE_RDMA_DCSP_EN_BIT_MASK) {
2637 if (in_params->dscp_en > 1) {
2638 DP_ERR(p_hwfn->p_dev, "invalid glob_dscp_en %d\n",
2639 in_params->dscp_en);
2642 glob_cfg.dscp_en = in_params->dscp_en;
2645 if (glob_cfg_bits & ECORE_RDMA_ECN_BIT_MASK) {
2646 if (in_params->ecn > INET_ECN_ECT_0) {
2647 DP_ERR(p_hwfn->p_dev, "invalid glob ecn %d\n",
2651 glob_cfg.ecn = in_params->ecn;
2654 if (glob_cfg_bits & ECORE_RDMA_ECN_EN_BIT_MASK) {
2655 if (in_params->ecn_en > 1) {
2656 DP_ERR(p_hwfn->p_dev, "invalid glob ecn en %d\n",
2660 glob_cfg.ecn_en = in_params->ecn_en;
2663 if (glob_cfg_bits & ECORE_RDMA_VLAN_PRIO_BIT_MASK) {
2664 if (in_params->vlan_pri > MAX_VLAN_PRIO) {
2665 DP_ERR(p_hwfn->p_dev, "invalid glob vlan pri %d\n",
2666 in_params->vlan_pri);
2669 glob_cfg.vlan_pri = in_params->vlan_pri;
2672 if (glob_cfg_bits & ECORE_RDMA_VLAN_PRIO_EN_BIT_MASK) {
2673 if (in_params->vlan_pri_en > 1) {
2674 DP_ERR(p_hwfn->p_dev, "invalid glob vlan pri en %d\n",
2675 in_params->vlan_pri_en);
2678 glob_cfg.vlan_pri_en = in_params->vlan_pri_en;
2681 /* Write back local cfg to global */
2682 OSAL_MEMCPY(&p_hwfn->p_rdma_info->glob_cfg, &glob_cfg,
2688 enum _ecore_status_t
2689 ecore_rdma_get_glob_cfg(struct ecore_hwfn *p_hwfn,
2690 struct ecore_rdma_glob_cfg *out_params)
2692 OSAL_MEMCPY(out_params, &p_hwfn->p_rdma_info->glob_cfg,
2693 sizeof(struct ecore_rdma_glob_cfg));
2695 return ECORE_SUCCESS;
2697 #endif /* LINUX_REMOVE */